VOL-1967 move api-server to separate repository

Change-Id: I21b85be74205805be15f8a85e53a903d16785671
diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml
index c5aa33d..629470c 100644
--- a/vendor/github.com/DataDog/zstd/.travis.yml
+++ b/vendor/github.com/DataDog/zstd/.travis.yml
@@ -1,9 +1,10 @@
+dist: xenial
 language: go
 
 go:
-  - 1.9.x
   - 1.10.x
   - 1.11.x
+  - 1.12.x
 
 os:
   - linux
diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md
index 6c02e16..b32c3e7 100644
--- a/vendor/github.com/DataDog/zstd/README.md
+++ b/vendor/github.com/DataDog/zstd/README.md
@@ -2,8 +2,8 @@
 
 [C Zstd Homepage](https://github.com/Cyan4973/zstd)
 
-The current headers and C files are from *v1.3.4* (Commit
-[2555975](https://github.com/facebook/zstd/releases/tag/v1.3.4)).
+The current headers and C files are from *v1.4.1* (Commit
+[52181f8](https://github.com/facebook/zstd/releases/tag/v1.4.1)).
 
 ## Usage
 
diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h
index f7f389f..d955bd6 100644
--- a/vendor/github.com/DataDog/zstd/bitstream.h
+++ b/vendor/github.com/DataDog/zstd/bitstream.h
@@ -1,8 +1,7 @@
 /* ******************************************************************
    bitstream
    Part of FSE library
-   header file (to include)
-   Copyright (C) 2013-2017, Yann Collet.
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -49,21 +48,10 @@
 *  Dependencies
 ******************************************/
 #include "mem.h"            /* unaligned access routines */
+#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
 #include "error_private.h"  /* error codes and messages */
 
 
-/*-*************************************
-*  Debug
-***************************************/
-#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
-#  include <assert.h>
-#else
-#  ifndef assert
-#    define assert(condition) ((void)0)
-#  endif
-#endif
-
-
 /*=========================================
 *  Target specific
 =========================================*/
@@ -83,8 +71,7 @@
  * A critical property of these streams is that they encode and decode in **reverse** direction.
  * So the first bit sequence you add will be the last to be read, like a LIFO stack.
  */
-typedef struct
-{
+typedef struct {
     size_t bitContainer;
     unsigned bitPos;
     char*  startPtr;
@@ -118,8 +105,7 @@
 /*-********************************************
 *  bitStream decoding API (read backward)
 **********************************************/
-typedef struct
-{
+typedef struct {
     size_t   bitContainer;
     unsigned bitsConsumed;
     const char* ptr;
@@ -236,7 +222,8 @@
 }
 
 /*! BIT_addBitsFast() :
- *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
+ *  works only if `value` is _clean_,
+ *  meaning all high bits above nbBits are 0 */
 MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
                                 size_t value, unsigned nbBits)
 {
@@ -352,17 +339,10 @@
 
 MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
 {
-#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008  /* experimental */
-#  if defined(__x86_64__)
-    if (sizeof(bitContainer)==8)
-        return _bextr_u64(bitContainer, start, nbBits);
-    else
-#  endif
-        return _bextr_u32(bitContainer, start, nbBits);
-#else
+    U32 const regMask = sizeof(bitContainer)*8 - 1;
+    /* if start > regMask, bitstream is corrupted, and result is undefined */
     assert(nbBits < BIT_MASK_SIZE);
-    return (bitContainer >> start) & BIT_mask[nbBits];
-#endif
+    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
 }
 
 MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
@@ -379,9 +359,13 @@
  * @return : value extracted */
 MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
 {
-#if defined(__BMI__) && defined(__GNUC__)   /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
+    /* arbitrate between double-shift and shift+mask */
+#if 1
+    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
+     * bitstream is likely corrupted, and result is undefined */
     return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
 #else
+    /* this code path is slower on my os-x laptop */
     U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
     return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
 #endif
@@ -405,7 +389,7 @@
  *  Read (consume) next n bits from local register and update.
  *  Pay attention to not read more than nbBits contained into local register.
  * @return : extracted value. */
-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
 {
     size_t const value = BIT_lookBits(bitD, nbBits);
     BIT_skipBits(bitD, nbBits);
@@ -414,7 +398,7 @@
 
 /*! BIT_readBitsFast() :
  *  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
 {
     size_t const value = BIT_lookBitsFast(bitD, nbBits);
     assert(nbBits >= 1);
diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h
index e90a3bc..87bf51a 100644
--- a/vendor/github.com/DataDog/zstd/compiler.h
+++ b/vendor/github.com/DataDog/zstd/compiler.h
@@ -15,6 +15,8 @@
 *  Compiler specifics
 *********************************************************/
 /* force inlining */
+
+#if !defined(ZSTD_NO_INLINE)
 #if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
 #  define INLINE_KEYWORD inline
 #else
@@ -29,9 +31,16 @@
 #  define FORCE_INLINE_ATTR
 #endif
 
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
+
 /**
  * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
- * parameters. They must be inlined for the compiler to elimininate the constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
  * branches.
  */
 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
@@ -77,9 +86,9 @@
  * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
  */
 #ifndef DYNAMIC_BMI2
-  #if (defined(__clang__) && __has_attribute(__target__)) \
+  #if ((defined(__clang__) && __has_attribute(__target__)) \
       || (defined(__GNUC__) \
-          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \
+          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
       && (defined(__x86_64__) || defined(_M_X86)) \
       && !defined(__BMI2__)
   #  define DYNAMIC_BMI2 1
@@ -88,14 +97,41 @@
   #endif
 #endif
 
-/* prefetch */
-#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
-#  include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
-#  define PREFETCH(ptr)   _mm_prefetch((const char*)ptr, _MM_HINT_T0)
-#elif defined(__GNUC__)
-#  define PREFETCH(ptr)   __builtin_prefetch(ptr, 0, 0)
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH build macro */
+#if defined(NO_PREFETCH)
+#  define PREFETCH_L1(ptr)  (void)(ptr)  /* disabled */
+#  define PREFETCH_L2(ptr)  (void)(ptr)  /* disabled */
 #else
-#  define PREFETCH(ptr)   /* disabled */
+#  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
+#    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+#    define PREFETCH_L1(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+#    define PREFETCH_L2(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
+#  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+#    define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+#    define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+#  else
+#    define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
+#    define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
+#  endif
+#endif  /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s)  {            \
+    const char* const _ptr = (const char*)(p);  \
+    size_t const _size = (size_t)(s);     \
+    size_t _pos;                          \
+    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
+        PREFETCH_L2(_ptr + _pos);         \
+    }                                     \
+}
+
+/* vectorization */
+#if !defined(__clang__) && defined(__GNUC__)
+#  define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+#else
+#  define DONT_VECTORIZE
 #endif
 
 /* disable warnings */
diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c
index b5a3957..6219967 100644
--- a/vendor/github.com/DataDog/zstd/cover.c
+++ b/vendor/github.com/DataDog/zstd/cover.c
@@ -29,6 +29,7 @@
 #include "mem.h" /* read */
 #include "pool.h"
 #include "threading.h"
+#include "cover.h"
 #include "zstd_internal.h" /* includes zstd.h */
 #ifndef ZDICT_STATIC_LINKING_ONLY
 #define ZDICT_STATIC_LINKING_ONLY
@@ -38,7 +39,8 @@
 /*-*************************************
 *  Constants
 ***************************************/
-#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
+#define DEFAULT_SPLITPOINT 1.0
 
 /*-*************************************
 *  Console display
@@ -184,7 +186,7 @@
 }
 
 /**
- * Destroyes a map that is inited with COVER_map_init().
+ * Destroys a map that is inited with COVER_map_init().
  */
 static void COVER_map_destroy(COVER_map_t *map) {
   if (map->data) {
@@ -203,6 +205,8 @@
   size_t *offsets;
   const size_t *samplesSizes;
   size_t nbSamples;
+  size_t nbTrainSamples;
+  size_t nbTestSamples;
   U32 *suffix;
   size_t suffixSize;
   U32 *freqs;
@@ -220,9 +224,9 @@
 /**
  * Returns the sum of the sample sizes.
  */
-static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
   size_t sum = 0;
-  size_t i;
+  unsigned i;
   for (i = 0; i < nbSamples; ++i) {
     sum += samplesSizes[i];
   }
@@ -377,14 +381,6 @@
   ctx->suffix[dmerId] = freq;
 }
 
-/**
- * A segment is a range in the source as well as the score of the segment.
- */
-typedef struct {
-  U32 begin;
-  U32 end;
-  U32 score;
-} COVER_segment_t;
 
 /**
  * Selects the best segment in an epoch.
@@ -395,7 +391,7 @@
  *
  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
  *
- * Once the dmer d is in the dictionay we set F(d) = 0.
+ * Once the dmer d is in the dictionary we set F(d) = 0.
  */
 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
                                            COVER_map_t *activeDmers, U32 begin,
@@ -439,7 +435,7 @@
       U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
       activeSegment.begin += 1;
       *delDmerOcc -= 1;
-      /* If this is the last occurence of the dmer, subtract its score */
+      /* If this is the last occurrence of the dmer, subtract its score */
       if (*delDmerOcc == 0) {
         COVER_map_remove(activeDmers, delDmer);
         activeSegment.score -= freqs[delDmer];
@@ -494,6 +490,10 @@
   if (parameters.d > parameters.k) {
     return 0;
   }
+  /* 0 < splitPoint <= 1 */
+  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
+    return 0;
+  }
   return 1;
 }
 
@@ -526,30 +526,49 @@
  * Prepare a context for dictionary building.
  * The context is only dependent on the parameter `d` and can used multiple
  * times.
- * Returns 1 on success or zero on error.
+ * Returns 0 on success or error code on error.
  * The context must be destroyed with `COVER_ctx_destroy()`.
  */
-static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
                           const size_t *samplesSizes, unsigned nbSamples,
-                          unsigned d) {
+                          unsigned d, double splitPoint) {
   const BYTE *const samples = (const BYTE *)samplesBuffer;
   const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+  /* Split samples into testing and training sets */
+  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
   /* Checks */
   if (totalSamplesSize < MAX(d, sizeof(U64)) ||
       totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
     DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
-                 (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
-    return 0;
+                 (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
+    return ERROR(srcSize_wrong);
+  }
+  /* Check if there are at least 5 training samples */
+  if (nbTrainSamples < 5) {
+    DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
+    return ERROR(srcSize_wrong);
+  }
+  /* Check if there's testing sample */
+  if (nbTestSamples < 1) {
+    DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
+    return ERROR(srcSize_wrong);
   }
   /* Zero the context */
   memset(ctx, 0, sizeof(*ctx));
-  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
-               (U32)totalSamplesSize);
+  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+               (unsigned)trainingSamplesSize);
+  DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+               (unsigned)testSamplesSize);
   ctx->samples = samples;
   ctx->samplesSizes = samplesSizes;
   ctx->nbSamples = nbSamples;
+  ctx->nbTrainSamples = nbTrainSamples;
+  ctx->nbTestSamples = nbTestSamples;
   /* Partial suffix array */
-  ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1;
+  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
   ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
   /* Maps index to the dmerID */
   ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
@@ -558,12 +577,12 @@
   if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
     DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
     COVER_ctx_destroy(ctx);
-    return 0;
+    return ERROR(memory_allocation);
   }
   ctx->freqs = NULL;
   ctx->d = d;
 
-  /* Fill offsets from the samlesSizes */
+  /* Fill offsets from the samplesSizes */
   {
     U32 i;
     ctx->offsets[0] = 0;
@@ -581,10 +600,17 @@
     for (i = 0; i < ctx->suffixSize; ++i) {
       ctx->suffix[i] = i;
     }
-    /* qsort doesn't take an opaque pointer, so pass as a global */
+    /* qsort doesn't take an opaque pointer, so pass as a global.
+     * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
+     */
     g_ctx = ctx;
+#if defined(__OpenBSD__)
+    mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#else
     qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#endif
   }
   DISPLAYLEVEL(2, "Computing frequencies\n");
   /* For each dmer group (group of positions with the same first d bytes):
@@ -598,7 +624,40 @@
                 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
   ctx->freqs = ctx->suffix;
   ctx->suffix = NULL;
-  return 1;
+  return 0;
+}
+
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
+{
+  const double ratio = (double)nbDmers / maxDictSize;
+  if (ratio >= 10) {
+      return;
+  }
+  LOCALDISPLAYLEVEL(displayLevel, 1,
+                    "WARNING: The maximum dictionary size %u is too large "
+                    "compared to the source size %u! "
+                    "size(source)/size(dictionary) = %f, but it should be >= "
+                    "10! This may lead to a subpar dictionary! We recommend "
+                    "training on sources at least 10x, and up to 100x the "
+                    "size of the dictionary!\n", (U32)maxDictSize,
+                    (U32)nbDmers, ratio);
+}
+
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
+                                       U32 nbDmers, U32 k, U32 passes)
+{
+  const U32 minEpochSize = k * 10;
+  COVER_epoch_info_t epochs;
+  epochs.num = MAX(1, maxDictSize / k / passes);
+  epochs.size = nbDmers / epochs.num;
+  if (epochs.size >= minEpochSize) {
+      assert(epochs.size * epochs.num <= nbDmers);
+      return epochs;
+  }
+  epochs.size = MIN(minEpochSize, nbDmers);
+  epochs.num = nbDmers / epochs.size;
+  assert(epochs.size * epochs.num <= nbDmers);
+  return epochs;
 }
 
 /**
@@ -610,28 +669,34 @@
                                     ZDICT_cover_params_t parameters) {
   BYTE *const dict = (BYTE *)dictBuffer;
   size_t tail = dictBufferCapacity;
-  /* Divide the data up into epochs of equal size.
-   * We will select at least one segment from each epoch.
-   */
-  const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
-  const U32 epochSize = (U32)(ctx->suffixSize / epochs);
+  /* Divide the data into epochs. We will select one segment from each epoch. */
+  const COVER_epoch_info_t epochs = COVER_computeEpochs(
+      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
+  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
+  size_t zeroScoreRun = 0;
   size_t epoch;
-  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
-               epochSize);
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+                (U32)epochs.num, (U32)epochs.size);
   /* Loop through the epochs until there are no more segments or the dictionary
    * is full.
    */
-  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
-    const U32 epochBegin = (U32)(epoch * epochSize);
-    const U32 epochEnd = epochBegin + epochSize;
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+    const U32 epochBegin = (U32)(epoch * epochs.size);
+    const U32 epochEnd = epochBegin + epochs.size;
     size_t segmentSize;
     /* Select a segment */
     COVER_segment_t segment = COVER_selectSegment(
         ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
-    /* If the segment covers no dmers, then we are out of content */
+    /* If the segment covers no dmers, then we are out of content.
+     * There may be new content in other epochs, for continue for some time.
+     */
     if (segment.score == 0) {
-      break;
+      if (++zeroScoreRun >= maxZeroScoreRun) {
+          break;
+      }
+      continue;
     }
+    zeroScoreRun = 0;
     /* Trim the segment if necessary and if it is too small then we are done */
     segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
     if (segmentSize < parameters.d) {
@@ -644,7 +709,7 @@
     memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
     DISPLAYUPDATE(
         2, "\r%u%%       ",
-        (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
   }
   DISPLAYLEVEL(2, "\r%79s\r", "");
   return tail;
@@ -658,17 +723,17 @@
   BYTE* const dict = (BYTE*)dictBuffer;
   COVER_ctx_t ctx;
   COVER_map_t activeDmers;
-
+  parameters.splitPoint = 1.0;
   /* Initialize global data */
   g_displayLevel = parameters.zParams.notificationLevel;
   /* Checks */
   if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
     DISPLAYLEVEL(1, "Cover parameters incorrect\n");
-    return ERROR(GENERIC);
+    return ERROR(parameter_outOfBound);
   }
   if (nbSamples == 0) {
     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(GENERIC);
+    return ERROR(srcSize_wrong);
   }
   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
@@ -676,14 +741,18 @@
     return ERROR(dstSize_tooSmall);
   }
   /* Initialize context and activeDmers */
-  if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
-                      parameters.d)) {
-    return ERROR(GENERIC);
+  {
+    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                      parameters.d, parameters.splitPoint);
+    if (ZSTD_isError(initVal)) {
+      return initVal;
+    }
   }
+  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
     COVER_ctx_destroy(&ctx);
-    return ERROR(GENERIC);
+    return ERROR(memory_allocation);
   }
 
   DISPLAYLEVEL(2, "Building dictionary\n");
@@ -696,7 +765,7 @@
         samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
     if (!ZSTD_isError(dictionarySize)) {
       DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
-                   (U32)dictionarySize);
+                   (unsigned)dictionarySize);
     }
     COVER_ctx_destroy(&ctx);
     COVER_map_destroy(&activeDmers);
@@ -704,28 +773,65 @@
   }
 }
 
-/**
- * COVER_best_t is used for two purposes:
- * 1. Synchronizing threads.
- * 2. Saving the best parameters and dictionary.
- *
- * All of the methods except COVER_best_init() are thread safe if zstd is
- * compiled with multithreaded support.
- */
-typedef struct COVER_best_s {
-  ZSTD_pthread_mutex_t mutex;
-  ZSTD_pthread_cond_t cond;
-  size_t liveJobs;
-  void *dict;
-  size_t dictSize;
-  ZDICT_cover_params_t parameters;
-  size_t compressedSize;
-} COVER_best_t;
+
+
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+                                    const size_t *samplesSizes, const BYTE *samples,
+                                    size_t *offsets,
+                                    size_t nbTrainSamples, size_t nbSamples,
+                                    BYTE *const dict, size_t dictBufferCapacity) {
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Pointers */
+  ZSTD_CCtx *cctx;
+  ZSTD_CDict *cdict;
+  void *dst;
+  /* Local variables */
+  size_t dstCapacity;
+  size_t i;
+  /* Allocate dst with enough space to compress the maximum sized sample */
+  {
+    size_t maxSampleSize = 0;
+    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+    for (; i < nbSamples; ++i) {
+      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
+    }
+    dstCapacity = ZSTD_compressBound(maxSampleSize);
+    dst = malloc(dstCapacity);
+  }
+  /* Create the cctx and cdict */
+  cctx = ZSTD_createCCtx();
+  cdict = ZSTD_createCDict(dict, dictBufferCapacity,
+                           parameters.zParams.compressionLevel);
+  if (!dst || !cctx || !cdict) {
+    goto _compressCleanup;
+  }
+  /* Compress each sample and sum their sizes (or error) */
+  totalCompressedSize = dictBufferCapacity;
+  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+  for (; i < nbSamples; ++i) {
+    const size_t size = ZSTD_compress_usingCDict(
+        cctx, dst, dstCapacity, samples + offsets[i],
+        samplesSizes[i], cdict);
+    if (ZSTD_isError(size)) {
+      totalCompressedSize = size;
+      goto _compressCleanup;
+    }
+    totalCompressedSize += size;
+  }
+_compressCleanup:
+  ZSTD_freeCCtx(cctx);
+  ZSTD_freeCDict(cdict);
+  if (dst) {
+    free(dst);
+  }
+  return totalCompressedSize;
+}
+
 
 /**
  * Initialize the `COVER_best_t`.
  */
-static void COVER_best_init(COVER_best_t *best) {
+void COVER_best_init(COVER_best_t *best) {
   if (best==NULL) return; /* compatible with init on NULL */
   (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
   (void)ZSTD_pthread_cond_init(&best->cond, NULL);
@@ -739,7 +845,7 @@
 /**
  * Wait until liveJobs == 0.
  */
-static void COVER_best_wait(COVER_best_t *best) {
+void COVER_best_wait(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -753,7 +859,7 @@
 /**
  * Call COVER_best_wait() and then destroy the COVER_best_t.
  */
-static void COVER_best_destroy(COVER_best_t *best) {
+void COVER_best_destroy(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -769,7 +875,7 @@
  * Called when a thread is about to be launched.
  * Increments liveJobs.
  */
-static void COVER_best_start(COVER_best_t *best) {
+void COVER_best_start(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -783,9 +889,11 @@
  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
  * If this dictionary is the best so far save it and its parameters.
  */
-static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
-                              ZDICT_cover_params_t parameters, void *dict,
-                              size_t dictSize) {
+void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
+                              COVER_dictSelection_t selection) {
+  void* dict = selection.dictContent;
+  size_t compressedSize = selection.totalCompressedSize;
+  size_t dictSize = selection.dictSize;
   if (!best) {
     return;
   }
@@ -805,19 +913,129 @@
         if (!best->dict) {
           best->compressedSize = ERROR(GENERIC);
           best->dictSize = 0;
+          ZSTD_pthread_cond_signal(&best->cond);
+          ZSTD_pthread_mutex_unlock(&best->mutex);
           return;
         }
       }
       /* Save the dictionary, parameters, and size */
+      if (!dict) {
+        return;
+      }
       memcpy(best->dict, dict, dictSize);
       best->dictSize = dictSize;
       best->parameters = parameters;
       best->compressedSize = compressedSize;
     }
-    ZSTD_pthread_mutex_unlock(&best->mutex);
     if (liveJobs == 0) {
       ZSTD_pthread_cond_broadcast(&best->cond);
     }
+    ZSTD_pthread_mutex_unlock(&best->mutex);
+  }
+}
+
+COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
+    COVER_dictSelection_t selection = { NULL, 0, error };
+    return selection;
+}
+
+unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
+  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
+}
+
+void COVER_dictSelectionFree(COVER_dictSelection_t selection){
+  free(selection.dictContent);
+}
+
+COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
+        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
+
+  size_t largestDict = 0;
+  size_t largestCompressed = 0;
+  BYTE* customDictContentEnd = customDictContent + dictContentSize;
+
+  BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize);
+  BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize);
+  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
+
+  if (!largestDictbuffer || !candidateDictBuffer) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(dictContentSize);
+  }
+
+  /* Initial dictionary size and compressed size */
+  memcpy(largestDictbuffer, customDictContent, dictContentSize);
+  dictContentSize = ZDICT_finalizeDictionary(
+    largestDictbuffer, dictContentSize, customDictContent, dictContentSize,
+    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
+
+  if (ZDICT_isError(dictContentSize)) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(dictContentSize);
+  }
+
+  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
+                                                       samplesBuffer, offsets,
+                                                       nbCheckSamples, nbSamples,
+                                                       largestDictbuffer, dictContentSize);
+
+  if (ZSTD_isError(totalCompressedSize)) {
+    free(largestDictbuffer);
+    free(candidateDictBuffer);
+    return COVER_dictSelectionError(totalCompressedSize);
+  }
+
+  if (params.shrinkDict == 0) {
+    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
+    free(candidateDictBuffer);
+    return selection;
+  }
+
+  largestDict = dictContentSize;
+  largestCompressed = totalCompressedSize;
+  dictContentSize = ZDICT_DICTSIZE_MIN;
+
+  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
+  while (dictContentSize < largestDict) {
+    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
+    dictContentSize = ZDICT_finalizeDictionary(
+      candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize,
+      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
+
+    if (ZDICT_isError(dictContentSize)) {
+      free(largestDictbuffer);
+      free(candidateDictBuffer);
+      return COVER_dictSelectionError(dictContentSize);
+
+    }
+
+    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
+                                                         samplesBuffer, offsets,
+                                                         nbCheckSamples, nbSamples,
+                                                         candidateDictBuffer, dictContentSize);
+
+    if (ZSTD_isError(totalCompressedSize)) {
+      free(largestDictbuffer);
+      free(candidateDictBuffer);
+      return COVER_dictSelectionError(totalCompressedSize);
+    }
+
+    if (totalCompressedSize <= largestCompressed * regressionTolerance) {
+      COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
+      free(largestDictbuffer);
+      return selection;
+    }
+    dictContentSize *= 2;
+  }
+  dictContentSize = largestDict;
+  totalCompressedSize = largestCompressed;
+  {
+    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
+    free(candidateDictBuffer);
+    return selection;
   }
 }
 
@@ -832,7 +1050,7 @@
 } COVER_tryParameters_data_t;
 
 /**
- * Tries a set of parameters and upates the COVER_best_t with the results.
+ * Tries a set of parameters and updates the COVER_best_t with the results.
  * This function is thread safe if zstd is compiled with multithreaded support.
  * It takes its parameters as an *OWNING* opaque pointer to support threading.
  */
@@ -846,6 +1064,7 @@
   /* Allocate space for hash table, dict, and freqs */
   COVER_map_t activeDmers;
   BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
   U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
@@ -861,68 +1080,21 @@
   {
     const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
                                               dictBufferCapacity, parameters);
-    dictBufferCapacity = ZDICT_finalizeDictionary(
-        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples,
-        parameters.zParams);
-    if (ZDICT_isError(dictBufferCapacity)) {
-      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
+        totalCompressedSize);
+
+    if (COVER_dictSelectionIsError(selection)) {
+      DISPLAYLEVEL(1, "Failed to select dictionary\n");
       goto _cleanup;
     }
   }
-  /* Check total compressed size */
-  {
-    /* Pointers */
-    ZSTD_CCtx *cctx;
-    ZSTD_CDict *cdict;
-    void *dst;
-    /* Local variables */
-    size_t dstCapacity;
-    size_t i;
-    /* Allocate dst with enough space to compress the maximum sized sample */
-    {
-      size_t maxSampleSize = 0;
-      for (i = 0; i < ctx->nbSamples; ++i) {
-        maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
-      }
-      dstCapacity = ZSTD_compressBound(maxSampleSize);
-      dst = malloc(dstCapacity);
-    }
-    /* Create the cctx and cdict */
-    cctx = ZSTD_createCCtx();
-    cdict = ZSTD_createCDict(dict, dictBufferCapacity,
-                             parameters.zParams.compressionLevel);
-    if (!dst || !cctx || !cdict) {
-      goto _compressCleanup;
-    }
-    /* Compress each sample and sum their sizes (or error) */
-    totalCompressedSize = dictBufferCapacity;
-    for (i = 0; i < ctx->nbSamples; ++i) {
-      const size_t size = ZSTD_compress_usingCDict(
-          cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
-          ctx->samplesSizes[i], cdict);
-      if (ZSTD_isError(size)) {
-        totalCompressedSize = ERROR(GENERIC);
-        goto _compressCleanup;
-      }
-      totalCompressedSize += size;
-    }
-  _compressCleanup:
-    ZSTD_freeCCtx(cctx);
-    ZSTD_freeCDict(cdict);
-    if (dst) {
-      free(dst);
-    }
-  }
-
 _cleanup:
-  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
-                    dictBufferCapacity);
+  free(dict);
+  COVER_best_finish(data->best, parameters, selection);
   free(data);
   COVER_map_destroy(&activeDmers);
-  if (dict) {
-    free(dict);
-  }
+  COVER_dictSelectionFree(selection);
   if (freqs) {
     free(freqs);
   }
@@ -934,6 +1106,8 @@
     ZDICT_cover_params_t *parameters) {
   /* constants */
   const unsigned nbThreads = parameters->nbThreads;
+  const double splitPoint =
+      parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
   const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
   const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
   const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
@@ -942,6 +1116,7 @@
   const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
   const unsigned kIterations =
       (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+  const unsigned shrinkDict = 0;
   /* Local variables */
   const int displayLevel = parameters->zParams.notificationLevel;
   unsigned iteration = 1;
@@ -949,15 +1124,20 @@
   unsigned k;
   COVER_best_t best;
   POOL_ctx *pool = NULL;
+  int warned = 0;
 
   /* Checks */
+  if (splitPoint <= 0 || splitPoint > 1) {
+    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+    return ERROR(parameter_outOfBound);
+  }
   if (kMinK < kMaxD || kMaxK < kMinK) {
     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
-    return ERROR(GENERIC);
+    return ERROR(parameter_outOfBound);
   }
   if (nbSamples == 0) {
     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
-    return ERROR(GENERIC);
+    return ERROR(srcSize_wrong);
   }
   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
@@ -981,11 +1161,18 @@
     /* Initialize the context for this value of d */
     COVER_ctx_t ctx;
     LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
-      LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
-      COVER_best_destroy(&best);
-      POOL_free(pool);
-      return ERROR(GENERIC);
+    {
+      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
+      if (ZSTD_isError(initVal)) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+        COVER_best_destroy(&best);
+        POOL_free(pool);
+        return initVal;
+      }
+    }
+    if (!warned) {
+      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
+      warned = 1;
     }
     /* Loop through k reusing the same context */
     for (k = kMinK; k <= kMaxK; k += kStepSize) {
@@ -998,7 +1185,7 @@
         COVER_best_destroy(&best);
         COVER_ctx_destroy(&ctx);
         POOL_free(pool);
-        return ERROR(GENERIC);
+        return ERROR(memory_allocation);
       }
       data->ctx = &ctx;
       data->best = &best;
@@ -1006,7 +1193,9 @@
       data->parameters = *parameters;
       data->parameters.k = k;
       data->parameters.d = d;
+      data->parameters.splitPoint = splitPoint;
       data->parameters.steps = kSteps;
+      data->parameters.shrinkDict = shrinkDict;
       data->parameters.zParams.notificationLevel = g_displayLevel;
       /* Check the parameters */
       if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
@@ -1023,7 +1212,7 @@
       }
       /* Print status */
       LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
-                         (U32)((iteration * 100) / kIterations));
+                         (unsigned)((iteration * 100) / kIterations));
       ++iteration;
     }
     COVER_best_wait(&best);
diff --git a/vendor/github.com/DataDog/zstd/cover.h b/vendor/github.com/DataDog/zstd/cover.h
new file mode 100644
index 0000000..d9e0636
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cover.h
@@ -0,0 +1,147 @@
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+  ZSTD_pthread_mutex_t mutex;
+  ZSTD_pthread_cond_t cond;
+  size_t liveJobs;
+  void *dict;
+  size_t dictSize;
+  ZDICT_cover_params_t parameters;
+  size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+  U32 begin;
+  U32 end;
+  U32 score;
+} COVER_segment_t;
+
+/**
+ *Number of epochs and size of each epoch.
+ */
+typedef struct {
+  U32 num;
+  U32 size;
+} COVER_epoch_info_t;
+
+/**
+ * Struct used for the dictionary selection function.
+ */
+typedef struct COVER_dictSelection {
+  BYTE* dictContent;
+  size_t dictSize;
+  size_t totalCompressedSize;
+} COVER_dictSelection_t;
+
+/**
+ * Computes the number of epochs and the size of each epoch.
+ * We will make sure that each epoch gets at least 10 * k bytes.
+ *
+ * The COVER algorithms divide the data up into epochs of equal size and
+ * select one segment from each epoch.
+ *
+ * @param maxDictSize The maximum allowed dictionary size.
+ * @param nbDmers     The number of dmers we are training on.
+ * @param k           The parameter k (segment size).
+ * @param passes      The target number of passes over the dmer corpus.
+ *                    More passes means a better dictionary.
+ */
+COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers,
+                                       U32 k, U32 passes);
+
+/**
+ * Warns the user when their corpus is too small.
+ */
+void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel);
+
+/**
+ *  Checks total compressed size of a dictionary
+ */
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+                                      const size_t *samplesSizes, const BYTE *samples,
+                                      size_t *offsets,
+                                      size_t nbTrainSamples, size_t nbSamples,
+                                      BYTE *const dict, size_t dictBufferCapacity);
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+void COVER_best_init(COVER_best_t *best);
+
+/**
+ * Wait until liveJobs == 0.
+ */
+void COVER_best_wait(COVER_best_t *best);
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+void COVER_best_destroy(COVER_best_t *best);
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+void COVER_best_start(COVER_best_t *best);
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
+                       COVER_dictSelection_t selection);
+/**
+ * Error function for COVER_selectDict function. Checks if the return
+ * value is an error.
+ */
+unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection);
+
+ /**
+  * Error function for COVER_selectDict function. Returns a struct where
+  * return.totalCompressedSize is a ZSTD error.
+  */
+COVER_dictSelection_t COVER_dictSelectionError(size_t error);
+
+/**
+ * Always call after selectDict is called to free up used memory from
+ * newly created dictionary.
+ */
+void COVER_dictSelectionFree(COVER_dictSelection_t selection);
+
+/**
+ * Called to finalize the dictionary and select one based on whether or not
+ * the shrink-dict flag was enabled. If enabled the dictionary used is the
+ * smallest dictionary within a specified regression of the compressed size
+ * from the largest dictionary.
+ */
+ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent,
+                       size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
+                       size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize);
diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h
old mode 100755
new mode 100644
index 4eb48e3..5f0923f
--- a/vendor/github.com/DataDog/zstd/cpu.h
+++ b/vendor/github.com/DataDog/zstd/cpu.h
@@ -36,7 +36,7 @@
     U32 f1d = 0;
     U32 f7b = 0;
     U32 f7c = 0;
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
     int reg[4];
     __cpuid((int*)reg, 0);
     {
@@ -72,14 +72,13 @@
           "cpuid\n\t"
           "popl %%ebx\n\t"
           : "=a"(f1a), "=c"(f1c), "=d"(f1d)
-          : "a"(1)
-          :);
+          : "a"(1));
     }
     if (n >= 7) {
       __asm__(
           "pushl %%ebx\n\t"
           "cpuid\n\t"
-          "movl %%ebx, %%eax\n\r"
+          "movl %%ebx, %%eax\n\t"
           "popl %%ebx"
           : "=a"(f7b), "=c"(f7c)
           : "a"(7), "c"(0)
diff --git a/vendor/github.com/DataDog/zstd/debug.c b/vendor/github.com/DataDog/zstd/debug.c
new file mode 100644
index 0000000..3ebdd1c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/debug.c
@@ -0,0 +1,44 @@
+/* ******************************************************************
+   debug
+   Part of FSE library
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/vendor/github.com/DataDog/zstd/debug.h b/vendor/github.com/DataDog/zstd/debug.h
new file mode 100644
index 0000000..b4fc89d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/debug.h
@@ -0,0 +1,134 @@
+/* ******************************************************************
+   debug
+   Part of FSE library
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact.
+ * static assert only works with compile-time constants.
+ * Also, this variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+#  define DEBUGLEVEL 0
+#endif
+
+
+/* DEBUGFILE can be defined externally,
+ * typically through compiler command line.
+ * note : currently useless.
+ * Value must be stderr or stdout */
+#ifndef DEBUGFILE
+#  define DEBUGFILE stderr
+#endif
+
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : release mode, no debug, all run-time checks disabled
+ * 1 : enables assert() only, no display
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively trigger high verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+#  include <assert.h>
+#else
+#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
+#    define assert(condition) ((void)0)   /* disable assert (default) */
+#  endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+#  include <stdio.h>
+extern int g_debuglevel; /* the variable is only declared,
+                            it actually lives in debug.c,
+                            and is shared by the whole process.
+                            It's not thread-safe.
+                            It's useful when enabling very verbose levels
+                            on selective conditions (such as position in src) */
+
+#  define RAWLOG(l, ...) {                                      \
+                if (l<=g_debuglevel) {                          \
+                    fprintf(stderr, __VA_ARGS__);               \
+            }   }
+#  define DEBUGLOG(l, ...) {                                    \
+                if (l<=g_debuglevel) {                          \
+                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+                    fprintf(stderr, " \n");                     \
+            }   }
+#else
+#  define RAWLOG(l, ...)      {}    /* disabled */
+#  define DEBUGLOG(l, ...)    {}    /* disabled */
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c
index 60cceb0..ead9220 100644
--- a/vendor/github.com/DataDog/zstd/divsufsort.c
+++ b/vendor/github.com/DataDog/zstd/divsufsort.c
@@ -1637,7 +1637,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else {
           assert(((s == 0) && (T[s] == c1)) || (s < 0));
@@ -1701,7 +1701,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else if(s != 0) {
           *j = ~s;
@@ -1785,7 +1785,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else if(s != 0) {
           *j = ~s;
diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c
index b37a082..b12944e 100644
--- a/vendor/github.com/DataDog/zstd/entropy_common.c
+++ b/vendor/github.com/DataDog/zstd/entropy_common.c
@@ -72,7 +72,21 @@
     unsigned charnum = 0;
     int previous0 = 0;
 
-    if (hbSize < 4) return ERROR(srcSize_wrong);
+    if (hbSize < 4) {
+        /* This function only works when hbSize >= 4 */
+        char buffer[4];
+        memset(buffer, 0, sizeof(buffer));
+        memcpy(buffer, headerBuffer, hbSize);
+        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+                                                    buffer, sizeof(buffer));
+            if (FSE_isError(countSize)) return countSize;
+            if (countSize > hbSize) return ERROR(corruption_detected);
+            return countSize;
+    }   }
+    assert(hbSize >= 4);
+
+    /* init */
+    memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
     bitStream = MEM_readLE32(ip);
     nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
     if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
@@ -105,6 +119,7 @@
             if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
             while (charnum < n0) normalizedCounter[charnum++] = 0;
             if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                assert((bitCount >> 3) <= 3); /* For first condition to work */
                 ip += bitCount>>3;
                 bitCount &= 7;
                 bitStream = MEM_readLE32(ip) >> bitCount;
diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c
index d004ee6..7c1bb67 100644
--- a/vendor/github.com/DataDog/zstd/error_private.c
+++ b/vendor/github.com/DataDog/zstd/error_private.c
@@ -14,6 +14,10 @@
 
 const char* ERR_getErrorString(ERR_enum code)
 {
+#ifdef ZSTD_STRIP_ERROR_STRINGS
+    (void)code;
+    return "Error strings stripped";
+#else
     static const char* const notErrorCode = "Unspecified error code";
     switch( code )
     {
@@ -39,10 +43,12 @@
     case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
     case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
     case PREFIX(srcSize_wrong): return "Src size is incorrect";
+    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
         /* following error codes are not stable and may be removed or changed in a future version */
     case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
     case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
     case PREFIX(maxCode):
     default: return notErrorCode;
     }
+#endif
 }
diff --git a/vendor/github.com/DataDog/zstd/fastcover.c b/vendor/github.com/DataDog/zstd/fastcover.c
new file mode 100644
index 0000000..941bb5a
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fastcover.c
@@ -0,0 +1,747 @@
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "cover.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
+#define FASTCOVER_MAX_F 31
+#define FASTCOVER_MAX_ACCEL 10
+#define DEFAULT_SPLITPOINT 0.75
+#define DEFAULT_F 20
+#define DEFAULT_ACCEL 1
+
+
+/*-*************************************
+*  Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...)                                                           \
+  {                                                                            \
+    fprintf(stderr, __VA_ARGS__);                                              \
+    fflush(stderr);                                                            \
+  }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
+  if (displayLevel >= l) {                                                     \
+    DISPLAY(__VA_ARGS__);                                                      \
+  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
+  if (displayLevel >= l) {                                                     \
+    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
+      g_time = clock();                                                        \
+      DISPLAY(__VA_ARGS__);                                                    \
+    }                                                                          \
+  }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+
+/*-*************************************
+* Hash Functions
+***************************************/
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+
+/**
+ * Hash the d-byte value pointed to by p and mod 2^f
+ */
+static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {
+  if (d == 6) {
+    return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);
+  }
+  return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);
+}
+
+
+/*-*************************************
+* Acceleration
+***************************************/
+typedef struct {
+  unsigned finalize;    /* Percentage of training samples used for ZDICT_finalizeDictionary */
+  unsigned skip;        /* Number of dmer skipped between each dmer counted in computeFrequency */
+} FASTCOVER_accel_t;
+
+
+static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {
+  { 100, 0 },   /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */
+  { 100, 0 },   /* accel = 1 */
+  { 50, 1 },   /* accel = 2 */
+  { 34, 2 },   /* accel = 3 */
+  { 25, 3 },   /* accel = 4 */
+  { 20, 4 },   /* accel = 5 */
+  { 17, 5 },   /* accel = 6 */
+  { 14, 6 },   /* accel = 7 */
+  { 13, 7 },   /* accel = 8 */
+  { 11, 8 },   /* accel = 9 */
+  { 10, 9 },   /* accel = 10 */
+};
+
+
+/*-*************************************
+* Context
+***************************************/
+typedef struct {
+  const BYTE *samples;
+  size_t *offsets;
+  const size_t *samplesSizes;
+  size_t nbSamples;
+  size_t nbTrainSamples;
+  size_t nbTestSamples;
+  size_t nbDmers;
+  U32 *freqs;
+  unsigned d;
+  unsigned f;
+  FASTCOVER_accel_t accelParams;
+} FASTCOVER_ctx_t;
+
+
+/*-*************************************
+*  Helper functions
+***************************************/
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of all dmers with hash value d.
+ * Let S_i be hash value of the dmer at position i of segment S which has length k.
+ *
+ *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer with hash value d is in the dictionary we set F(d) = 0.
+ */
+static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
+                                              U32 *freqs, U32 begin, U32 end,
+                                              ZDICT_cover_params_t parameters,
+                                              U16* segmentFreqs) {
+  /* Constants */
+  const U32 k = parameters.k;
+  const U32 d = parameters.d;
+  const U32 f = ctx->f;
+  const U32 dmersInK = k - d + 1;
+
+  /* Try each segment (activeSegment) and save the best (bestSegment) */
+  COVER_segment_t bestSegment = {0, 0, 0};
+  COVER_segment_t activeSegment;
+
+  /* Reset the activeDmers in the segment */
+  /* The activeSegment starts at the beginning of the epoch. */
+  activeSegment.begin = begin;
+  activeSegment.end = begin;
+  activeSegment.score = 0;
+
+  /* Slide the activeSegment through the whole epoch.
+   * Save the best segment in bestSegment.
+   */
+  while (activeSegment.end < end) {
+    /* Get hash value of current dmer */
+    const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
+
+    /* Add frequency of this index to score if this is the first occurrence of index in active segment */
+    if (segmentFreqs[idx] == 0) {
+      activeSegment.score += freqs[idx];
+    }
+    /* Increment end of segment and segmentFreqs*/
+    activeSegment.end += 1;
+    segmentFreqs[idx] += 1;
+    /* If the window is now too large, drop the first position */
+    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+      /* Get hash value of the dmer to be eliminated from active segment */
+      const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+      segmentFreqs[delIndex] -= 1;
+      /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */
+      if (segmentFreqs[delIndex] == 0) {
+        activeSegment.score -= freqs[delIndex];
+      }
+      /* Increment start of segment */
+      activeSegment.begin += 1;
+    }
+
+    /* If this segment is the best so far save it */
+    if (activeSegment.score > bestSegment.score) {
+      bestSegment = activeSegment;
+    }
+  }
+
+  /* Zero out rest of segmentFreqs array */
+  while (activeSegment.begin < end) {
+    const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+    segmentFreqs[delIndex] -= 1;
+    activeSegment.begin += 1;
+  }
+
+  {
+    /*  Zero the frequency of hash value of each dmer covered by the chosen segment. */
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
+      freqs[i] = 0;
+    }
+  }
+
+  return bestSegment;
+}
+
+
+static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,
+                                     size_t maxDictSize, unsigned f,
+                                     unsigned accel) {
+  /* k, d, and f are required parameters */
+  if (parameters.d == 0 || parameters.k == 0) {
+    return 0;
+  }
+  /* d has to be 6 or 8 */
+  if (parameters.d != 6 && parameters.d != 8) {
+    return 0;
+  }
+  /* k <= maxDictSize */
+  if (parameters.k > maxDictSize) {
+    return 0;
+  }
+  /* d <= k */
+  if (parameters.d > parameters.k) {
+    return 0;
+  }
+  /* 0 < f <= FASTCOVER_MAX_F*/
+  if (f > FASTCOVER_MAX_F || f == 0) {
+    return 0;
+  }
+  /* 0 < splitPoint <= 1 */
+  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {
+    return 0;
+  }
+  /* 0 < accel <= 10 */
+  if (accel > 10 || accel == 0) {
+    return 0;
+  }
+  return 1;
+}
+
+
+/**
+ * Clean up a context initialized with `FASTCOVER_ctx_init()`.
+ */
+static void
+FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
+{
+    if (!ctx) return;
+
+    free(ctx->freqs);
+    ctx->freqs = NULL;
+
+    free(ctx->offsets);
+    ctx->offsets = NULL;
+}
+
+
+/**
+ * Calculate for frequency of hash value of each dmer in ctx->samples
+ */
+static void
+FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)
+{
+    const unsigned f = ctx->f;
+    const unsigned d = ctx->d;
+    const unsigned skip = ctx->accelParams.skip;
+    const unsigned readLength = MAX(d, 8);
+    size_t i;
+    assert(ctx->nbTrainSamples >= 5);
+    assert(ctx->nbTrainSamples <= ctx->nbSamples);
+    for (i = 0; i < ctx->nbTrainSamples; i++) {
+        size_t start = ctx->offsets[i];  /* start of current dmer */
+        size_t const currSampleEnd = ctx->offsets[i+1];
+        while (start + readLength <= currSampleEnd) {
+            const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
+            freqs[dmerIndex]++;
+            start = start + skip + 1;
+        }
+    }
+}
+
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 0 on success or error code on error.
+ * The context must be destroyed with `FASTCOVER_ctx_destroy()`.
+ */
+static size_t
+FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
+                   const void* samplesBuffer,
+                   const size_t* samplesSizes, unsigned nbSamples,
+                   unsigned d, double splitPoint, unsigned f,
+                   FASTCOVER_accel_t accelParams)
+{
+    const BYTE* const samples = (const BYTE*)samplesBuffer;
+    const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+    /* Split samples into testing and training sets */
+    const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+    const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+    const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+    const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
+
+    /* Checks */
+    if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+        totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
+        DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+                    (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
+        return ERROR(srcSize_wrong);
+    }
+
+    /* Check if there are at least 5 training samples */
+    if (nbTrainSamples < 5) {
+        DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
+        return ERROR(srcSize_wrong);
+    }
+
+    /* Check if there's testing sample */
+    if (nbTestSamples < 1) {
+        DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
+        return ERROR(srcSize_wrong);
+    }
+
+    /* Zero the context */
+    memset(ctx, 0, sizeof(*ctx));
+    DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+                    (unsigned)trainingSamplesSize);
+    DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+                    (unsigned)testSamplesSize);
+
+    ctx->samples = samples;
+    ctx->samplesSizes = samplesSizes;
+    ctx->nbSamples = nbSamples;
+    ctx->nbTrainSamples = nbTrainSamples;
+    ctx->nbTestSamples = nbTestSamples;
+    ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
+    ctx->d = d;
+    ctx->f = f;
+    ctx->accelParams = accelParams;
+
+    /* The offsets of each file */
+    ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));
+    if (ctx->offsets == NULL) {
+        DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
+        FASTCOVER_ctx_destroy(ctx);
+        return ERROR(memory_allocation);
+    }
+
+    /* Fill offsets from the samplesSizes */
+    {   U32 i;
+        ctx->offsets[0] = 0;
+        assert(nbSamples >= 5);
+        for (i = 1; i <= nbSamples; ++i) {
+            ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+        }
+    }
+
+    /* Initialize frequency array of size 2^f */
+    ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));
+    if (ctx->freqs == NULL) {
+        DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
+        FASTCOVER_ctx_destroy(ctx);
+        return ERROR(memory_allocation);
+    }
+
+    DISPLAYLEVEL(2, "Computing frequencies\n");
+    FASTCOVER_computeFrequency(ctx->freqs, ctx);
+
+    return 0;
+}
+
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t
+FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
+                          U32* freqs,
+                          void* dictBuffer, size_t dictBufferCapacity,
+                          ZDICT_cover_params_t parameters,
+                          U16* segmentFreqs)
+{
+  BYTE *const dict = (BYTE *)dictBuffer;
+  size_t tail = dictBufferCapacity;
+  /* Divide the data into epochs. We will select one segment from each epoch. */
+  const COVER_epoch_info_t epochs = COVER_computeEpochs(
+      (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1);
+  const size_t maxZeroScoreRun = 10;
+  size_t zeroScoreRun = 0;
+  size_t epoch;
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
+                (U32)epochs.num, (U32)epochs.size);
+  /* Loop through the epochs until there are no more segments or the dictionary
+   * is full.
+   */
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
+    const U32 epochBegin = (U32)(epoch * epochs.size);
+    const U32 epochEnd = epochBegin + epochs.size;
+    size_t segmentSize;
+    /* Select a segment */
+    COVER_segment_t segment = FASTCOVER_selectSegment(
+        ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
+
+    /* If the segment covers no dmers, then we are out of content.
+     * There may be new content in other epochs, for continue for some time.
+     */
+    if (segment.score == 0) {
+      if (++zeroScoreRun >= maxZeroScoreRun) {
+          break;
+      }
+      continue;
+    }
+    zeroScoreRun = 0;
+
+    /* Trim the segment if necessary and if it is too small then we are done */
+    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+    if (segmentSize < parameters.d) {
+      break;
+    }
+
+    /* We fill the dictionary from the back to allow the best segments to be
+     * referenced with the smallest offsets.
+     */
+    tail -= segmentSize;
+    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+    DISPLAYUPDATE(
+        2, "\r%u%%       ",
+        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+  }
+  DISPLAYLEVEL(2, "\r%79s\r", "");
+  return tail;
+}
+
+/**
+ * Parameters for FASTCOVER_tryParameters().
+ */
+typedef struct FASTCOVER_tryParameters_data_s {
+    const FASTCOVER_ctx_t* ctx;
+    COVER_best_t* best;
+    size_t dictBufferCapacity;
+    ZDICT_cover_params_t parameters;
+} FASTCOVER_tryParameters_data_t;
+
+
+/**
+ * Tries a set of parameters and updates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void FASTCOVER_tryParameters(void *opaque)
+{
+  /* Save parameters as local variables */
+  FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;
+  const FASTCOVER_ctx_t *const ctx = data->ctx;
+  const ZDICT_cover_params_t parameters = data->parameters;
+  size_t dictBufferCapacity = data->dictBufferCapacity;
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Initialize array to keep track of frequency of dmer within activeSegment */
+  U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
+  /* Allocate space for hash table, dict, and freqs */
+  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
+  U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
+  if (!segmentFreqs || !dict || !freqs) {
+    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+    goto _cleanup;
+  }
+  /* Copy the frequencies because we need to modify them */
+  memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
+  /* Build the dictionary */
+  { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
+                                                    parameters, segmentFreqs);
+
+    const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
+    selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail,
+         ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
+         totalCompressedSize);
+
+    if (COVER_dictSelectionIsError(selection)) {
+      DISPLAYLEVEL(1, "Failed to select dictionary\n");
+      goto _cleanup;
+    }
+  }
+_cleanup:
+  free(dict);
+  COVER_best_finish(data->best, parameters, selection);
+  free(data);
+  free(segmentFreqs);
+  COVER_dictSelectionFree(selection);
+  free(freqs);
+}
+
+
+static void
+FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,
+                               ZDICT_cover_params_t* coverParams)
+{
+    coverParams->k = fastCoverParams.k;
+    coverParams->d = fastCoverParams.d;
+    coverParams->steps = fastCoverParams.steps;
+    coverParams->nbThreads = fastCoverParams.nbThreads;
+    coverParams->splitPoint = fastCoverParams.splitPoint;
+    coverParams->zParams = fastCoverParams.zParams;
+    coverParams->shrinkDict = fastCoverParams.shrinkDict;
+}
+
+
+static void
+FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,
+                                   ZDICT_fastCover_params_t* fastCoverParams,
+                                   unsigned f, unsigned accel)
+{
+    fastCoverParams->k = coverParams.k;
+    fastCoverParams->d = coverParams.d;
+    fastCoverParams->steps = coverParams.steps;
+    fastCoverParams->nbThreads = coverParams.nbThreads;
+    fastCoverParams->splitPoint = coverParams.splitPoint;
+    fastCoverParams->f = f;
+    fastCoverParams->accel = accel;
+    fastCoverParams->zParams = coverParams.zParams;
+    fastCoverParams->shrinkDict = coverParams.shrinkDict;
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* samplesBuffer,
+                                const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_fastCover_params_t parameters)
+{
+    BYTE* const dict = (BYTE*)dictBuffer;
+    FASTCOVER_ctx_t ctx;
+    ZDICT_cover_params_t coverParams;
+    FASTCOVER_accel_t accelParams;
+    /* Initialize global data */
+    g_displayLevel = parameters.zParams.notificationLevel;
+    /* Assign splitPoint and f if not provided */
+    parameters.splitPoint = 1.0;
+    parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
+    parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;
+    /* Convert to cover parameter */
+    memset(&coverParams, 0 , sizeof(coverParams));
+    FASTCOVER_convertToCoverParams(parameters, &coverParams);
+    /* Checks */
+    if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
+                                   parameters.accel)) {
+      DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+      return ERROR(parameter_outOfBound);
+    }
+    if (nbSamples == 0) {
+      DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
+      return ERROR(srcSize_wrong);
+    }
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+      DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                   ZDICT_DICTSIZE_MIN);
+      return ERROR(dstSize_tooSmall);
+    }
+    /* Assign corresponding FASTCOVER_accel_t to accelParams*/
+    accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
+    /* Initialize context */
+    {
+      size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                            coverParams.d, parameters.splitPoint, parameters.f,
+                            accelParams);
+      if (ZSTD_isError(initVal)) {
+        DISPLAYLEVEL(1, "Failed to initialize context\n");
+        return initVal;
+      }
+    }
+    COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel);
+    /* Build the dictionary */
+    DISPLAYLEVEL(2, "Building dictionary\n");
+    {
+      /* Initialize array to keep track of frequency of dmer within activeSegment */
+      U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));
+      const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,
+                                                dictBufferCapacity, coverParams, segmentFreqs);
+      const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
+      const size_t dictionarySize = ZDICT_finalizeDictionary(
+          dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+          samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
+      if (!ZSTD_isError(dictionarySize)) {
+          DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+                      (unsigned)dictionarySize);
+      }
+      FASTCOVER_ctx_destroy(&ctx);
+      free(segmentFreqs);
+      return dictionarySize;
+    }
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_optimizeTrainFromBuffer_fastCover(
+                    void* dictBuffer, size_t dictBufferCapacity,
+                    const void* samplesBuffer,
+                    const size_t* samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t* parameters)
+{
+    ZDICT_cover_params_t coverParams;
+    FASTCOVER_accel_t accelParams;
+    /* constants */
+    const unsigned nbThreads = parameters->nbThreads;
+    const double splitPoint =
+        parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+    const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+    const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+    const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+    const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+    const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+    const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+    const unsigned kIterations =
+        (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+    const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
+    const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
+    const unsigned shrinkDict = 0;
+    /* Local variables */
+    const int displayLevel = parameters->zParams.notificationLevel;
+    unsigned iteration = 1;
+    unsigned d;
+    unsigned k;
+    COVER_best_t best;
+    POOL_ctx *pool = NULL;
+    int warned = 0;
+    /* Checks */
+    if (splitPoint <= 0 || splitPoint > 1) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
+      return ERROR(parameter_outOfBound);
+    }
+    if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
+      return ERROR(parameter_outOfBound);
+    }
+    if (kMinK < kMaxD || kMaxK < kMinK) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
+      return ERROR(parameter_outOfBound);
+    }
+    if (nbSamples == 0) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
+      return ERROR(srcSize_wrong);
+    }
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
+                   ZDICT_DICTSIZE_MIN);
+      return ERROR(dstSize_tooSmall);
+    }
+    if (nbThreads > 1) {
+      pool = POOL_create(nbThreads, 1);
+      if (!pool) {
+        return ERROR(memory_allocation);
+      }
+    }
+    /* Initialization */
+    COVER_best_init(&best);
+    memset(&coverParams, 0 , sizeof(coverParams));
+    FASTCOVER_convertToCoverParams(*parameters, &coverParams);
+    accelParams = FASTCOVER_defaultAccelParameters[accel];
+    /* Turn down global display level to clean up display at level 2 and below */
+    g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+    /* Loop through d first because each new value needs a new context */
+    LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+                      kIterations);
+    for (d = kMinD; d <= kMaxD; d += 2) {
+      /* Initialize the context for this value of d */
+      FASTCOVER_ctx_t ctx;
+      LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+      {
+        size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams);
+        if (ZSTD_isError(initVal)) {
+          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+          COVER_best_destroy(&best);
+          POOL_free(pool);
+          return initVal;
+        }
+      }
+      if (!warned) {
+        COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel);
+        warned = 1;
+      }
+      /* Loop through k reusing the same context */
+      for (k = kMinK; k <= kMaxK; k += kStepSize) {
+        /* Prepare the arguments */
+        FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(
+            sizeof(FASTCOVER_tryParameters_data_t));
+        LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+        if (!data) {
+          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+          COVER_best_destroy(&best);
+          FASTCOVER_ctx_destroy(&ctx);
+          POOL_free(pool);
+          return ERROR(memory_allocation);
+        }
+        data->ctx = &ctx;
+        data->best = &best;
+        data->dictBufferCapacity = dictBufferCapacity;
+        data->parameters = coverParams;
+        data->parameters.k = k;
+        data->parameters.d = d;
+        data->parameters.splitPoint = splitPoint;
+        data->parameters.steps = kSteps;
+        data->parameters.shrinkDict = shrinkDict;
+        data->parameters.zParams.notificationLevel = g_displayLevel;
+        /* Check the parameters */
+        if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
+                                       data->ctx->f, accel)) {
+          DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+          free(data);
+          continue;
+        }
+        /* Call the function and pass ownership of data to it */
+        COVER_best_start(&best);
+        if (pool) {
+          POOL_add(pool, &FASTCOVER_tryParameters, data);
+        } else {
+          FASTCOVER_tryParameters(data);
+        }
+        /* Print status */
+        LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
+                           (unsigned)((iteration * 100) / kIterations));
+        ++iteration;
+      }
+      COVER_best_wait(&best);
+      FASTCOVER_ctx_destroy(&ctx);
+    }
+    LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+    /* Fill the output buffer and parameters with output of the best parameters */
+    {
+      const size_t dictSize = best.dictSize;
+      if (ZSTD_isError(best.compressedSize)) {
+        const size_t compressedSize = best.compressedSize;
+        COVER_best_destroy(&best);
+        POOL_free(pool);
+        return compressedSize;
+      }
+      FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
+      memcpy(dictBuffer, best.dict, dictSize);
+      COVER_best_destroy(&best);
+      POOL_free(pool);
+      return dictSize;
+    }
+
+}
diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h
index 6a1d272..811c670 100644
--- a/vendor/github.com/DataDog/zstd/fse.h
+++ b/vendor/github.com/DataDog/zstd/fse.h
@@ -72,6 +72,7 @@
 #define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
 FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
 
+
 /*-****************************************
 *  FSE simple functions
 ******************************************/
@@ -129,7 +130,7 @@
 ******************************************/
 /*!
 FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[]
+1. count symbol occurrence from source[] into table count[] (see hist.h)
 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
 3. save normalized counters to memory buffer using writeNCount()
 4. build encoding table 'CTable' from normalized counters
@@ -147,15 +148,6 @@
 
 /* *** COMPRESSION *** */
 
-/*! FSE_count():
-    Provides the precise count of each byte within a table 'count'.
-    'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
-    *maxSymbolValuePtr will be updated if detected smaller than initial value.
-    @return : the count of the most frequent symbol (which is not identified).
-              if return == srcSize, there is only one symbol.
-              Can also return an error code, which can be tested with FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
 /*! FSE_optimalTableLog():
     dynamically downsize 'tableLog' when conditions are met.
     It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
@@ -167,7 +159,8 @@
     'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
     @return : tableLog,
               or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
 
 /*! FSE_NCountWriteBound():
     Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
@@ -178,8 +171,9 @@
     Compactly save 'normalizedCounter' into 'buffer'.
     @return : size of the compressed table,
               or an errorCode, which can be tested using FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+                                 const short* normalizedCounter,
+                                 unsigned maxSymbolValue, unsigned tableLog);
 
 /*! Constructor and Destructor of FSE_CTable.
     Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
@@ -250,7 +244,9 @@
     @return : size read from 'rBuffer',
               or an errorCode, which can be tested using FSE_isError().
               maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+                           const void* rBuffer, size_t rBuffSize);
 
 /*! Constructor and Destructor of FSE_DTable.
     Note that its size depends on 'tableLog' */
@@ -325,33 +321,8 @@
 
 
 /* *****************************************
-*  FSE advanced API
-*******************************************/
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned
- */
-size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* source, size_t sourceSize, unsigned* workSpace);
-
-/** FSE_countFast() :
- *  same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
- */
-size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` must be a table of minimum `1024` unsigned
- */
-size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
-
-/*! FSE_count_simple() :
- * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
- * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
-*/
-size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
-
+ *  FSE advanced API
+ ***************************************** */
 
 unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
 /**< same as FSE_optimalTableLog(), which used `minus==2` */
@@ -387,7 +358,7 @@
 typedef enum {
    FSE_repeat_none,  /**< Cannot use the previous table */
    FSE_repeat_check, /**< Can use the previous table but it must be checked */
-   FSE_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
  } FSE_repeat;
 
 /* *****************************************
@@ -541,7 +512,7 @@
     const U32 tableLog = MEM_read16(ptr);
     statePtr->value = (ptrdiff_t)1<<tableLog;
     statePtr->stateTable = u16ptr+2;
-    statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
+    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
     statePtr->stateLog = tableLog;
 }
 
@@ -560,7 +531,7 @@
     }
 }
 
-MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
 {
     FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
     const U16* const stateTable = (const U16*)(statePtr->stateTable);
@@ -576,6 +547,39 @@
 }
 
 
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+    U32 const threshold = (minNbBits+1) << 16;
+    assert(tableLog < 16);
+    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
+    {   U32 const tableSize = 1 << tableLog;
+        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
+        U32 const bitMultiplier = 1 << accuracyLog;
+        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+        assert(normalizedDeltaFromThreshold <= bitMultiplier);
+        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+    }
+}
+
+
 /* ======    Decompression    ====== */
 
 typedef struct {
diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c
index cb8f1fa..68b47e1 100644
--- a/vendor/github.com/DataDog/zstd/fse_compress.c
+++ b/vendor/github.com/DataDog/zstd/fse_compress.c
@@ -1,6 +1,6 @@
 /* ******************************************************************
    FSE : Finite State Entropy encoder
-   Copyright (C) 2013-2015, Yann Collet.
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -37,9 +37,11 @@
 ****************************************************************/
 #include <stdlib.h>     /* malloc, free, qsort */
 #include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
-#include "bitstream.h"
 #include "compiler.h"
+#include "mem.h"        /* U32, U16, etc. */
+#include "debug.h"      /* assert, DEBUGLOG */
+#include "hist.h"       /* HIST_count_wksp */
+#include "bitstream.h"
 #define FSE_STATIC_LINKING_ONLY
 #include "fse.h"
 #include "error_private.h"
@@ -49,7 +51,6 @@
 *  Error Management
 ****************************************************************/
 #define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
 
 
 /* **************************************************************
@@ -82,7 +83,9 @@
  * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
  * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
  */
-size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                            void* workSpace, size_t wkspSize)
 {
     U32 const tableSize = 1 << tableLog;
     U32 const tableMask = tableSize - 1;
@@ -100,14 +103,19 @@
     if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
     tableU16[-2] = (U16) tableLog;
     tableU16[-1] = (U16) maxSymbolValue;
+    assert(tableLog < 16);   /* required for threshold strategy to work */
 
     /* For explanations on how to distribute symbol values over the table :
-    *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+     #ifdef __clang_analyzer__
+     memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
+     #endif
 
     /* symbol start positions */
     {   U32 u;
         cumul[0] = 0;
-        for (u=1; u<=maxSymbolValue+1; u++) {
+        for (u=1; u <= maxSymbolValue+1; u++) {
             if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
                 cumul[u] = cumul[u-1] + 1;
                 tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
@@ -121,14 +129,16 @@
     {   U32 position = 0;
         U32 symbol;
         for (symbol=0; symbol<=maxSymbolValue; symbol++) {
-            int nbOccurences;
-            for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
+            int nbOccurrences;
+            int const freq = normalizedCounter[symbol];
+            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
                 tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
                 position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* Low proba area */
+                while (position > highThreshold)
+                    position = (position + step) & tableMask;   /* Low proba area */
         }   }
 
-        if (position!=0) return ERROR(GENERIC);   /* Must have gone through all positions */
+        assert(position==0);  /* Must have initialized all positions */
     }
 
     /* Build table */
@@ -143,7 +153,10 @@
         for (s=0; s<=maxSymbolValue; s++) {
             switch (normalizedCounter[s])
             {
-            case  0: break;
+            case  0:
+                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+                break;
 
             case -1:
             case  1:
@@ -160,6 +173,18 @@
                     total +=  normalizedCounter[s];
     }   }   }   }
 
+#if 0  /* debug : symbol costs */
+    DEBUGLOG(5, "\n --- table statistics : ");
+    {   U32 symbol;
+        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
+                symbol, normalizedCounter[symbol],
+                FSE_getMaxNbBits(symbolTT, symbol),
+                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+        }
+    }
+#endif
+
     return 0;
 }
 
@@ -174,8 +199,9 @@
 
 #ifndef FSE_COMMONDEFS_ONLY
 
+
 /*-**************************************************************
-*  FSE NCount encoding-decoding
+*  FSE NCount encoding
 ****************************************************************/
 size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
 {
@@ -183,9 +209,10 @@
     return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
 }
 
-static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
-                                       const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
-                                       unsigned writeIsSafe)
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                         unsigned writeIsSafe)
 {
     BYTE* const ostart = (BYTE*) header;
     BYTE* out = ostart;
@@ -194,13 +221,12 @@
     const int tableSize = 1 << tableLog;
     int remaining;
     int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
+    U32 bitStream = 0;
+    int bitCount = 0;
+    unsigned symbol = 0;
+    unsigned const alphabetSize = maxSymbolValue + 1;
+    int previousIs0 = 0;
 
-    bitStream = 0;
-    bitCount  = 0;
     /* Table Size */
     bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
     bitCount  += 4;
@@ -210,48 +236,53 @@
     threshold = tableSize;
     nbBits = tableLog+1;
 
-    while (remaining>1) {  /* stops at 1 */
-        if (previous0) {
-            unsigned start = charnum;
-            while (!normalizedCounter[charnum]) charnum++;
-            while (charnum >= start+24) {
+    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
+        if (previousIs0) {
+            unsigned start = symbol;
+            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+            if (symbol == alphabetSize) break;   /* incorrect distribution */
+            while (symbol >= start+24) {
                 start+=24;
                 bitStream += 0xFFFFU << bitCount;
-                if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                if ((!writeIsSafe) && (out > oend-2))
+                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
                 out[0] = (BYTE) bitStream;
                 out[1] = (BYTE)(bitStream>>8);
                 out+=2;
                 bitStream>>=16;
             }
-            while (charnum >= start+3) {
+            while (symbol >= start+3) {
                 start+=3;
                 bitStream += 3 << bitCount;
                 bitCount += 2;
             }
-            bitStream += (charnum-start) << bitCount;
+            bitStream += (symbol-start) << bitCount;
             bitCount += 2;
             if (bitCount>16) {
-                if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                if ((!writeIsSafe) && (out > oend - 2))
+                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
                 out[0] = (BYTE)bitStream;
                 out[1] = (BYTE)(bitStream>>8);
                 out += 2;
                 bitStream >>= 16;
                 bitCount -= 16;
         }   }
-        {   int count = normalizedCounter[charnum++];
-            int const max = (2*threshold-1)-remaining;
+        {   int count = normalizedCounter[symbol++];
+            int const max = (2*threshold-1) - remaining;
             remaining -= count < 0 ? -count : count;
             count++;   /* +1 for extra accuracy */
-            if (count>=threshold) count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+            if (count>=threshold)
+                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
             bitStream += count << bitCount;
             bitCount  += nbBits;
             bitCount  -= (count<max);
-            previous0  = (count==1);
+            previousIs0  = (count==1);
             if (remaining<1) return ERROR(GENERIC);
             while (remaining<threshold) { nbBits--; threshold>>=1; }
         }
         if (bitCount>16) {
-            if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+            if ((!writeIsSafe) && (out > oend - 2))
+                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
             out[0] = (BYTE)bitStream;
             out[1] = (BYTE)(bitStream>>8);
             out += 2;
@@ -259,19 +290,23 @@
             bitCount -= 16;
     }   }
 
+    if (remaining != 1)
+        return ERROR(GENERIC);  /* incorrect normalized distribution */
+    assert(symbol <= alphabetSize);
+
     /* flush remaining bitStream */
-    if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+    if ((!writeIsSafe) && (out > oend - 2))
+        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
     out[0] = (BYTE)bitStream;
     out[1] = (BYTE)(bitStream>>8);
     out+= (bitCount+7) /8;
 
-    if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
-
     return (out-ostart);
 }
 
 
-size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
 {
     if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
     if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
@@ -279,179 +314,13 @@
     if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
         return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
 
-    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
+    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
 }
 
 
-
-/*-**************************************************************
-*  Counting histogram
-****************************************************************/
-/*! FSE_count_simple
-    This function counts byte values within `src`, and store the histogram into table `count`.
-    It doesn't use any additional memory.
-    But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
-    For this reason, prefer using a table `count` with 256 elements.
-    @return : count of most numerous element.
-*/
-size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
-                        const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* const end = ip + srcSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned max=0;
-
-    memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
-    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
-
-    while (ip<end) {
-        assert(*ip <= maxSymbolValue);
-        count[*ip++]++;
-    }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-
-    { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
-
-    return (size_t)max;
-}
-
-
-/* FSE_count_parallel_wksp() :
- * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
- * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
- * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
-static size_t FSE_count_parallel_wksp(
-                                unsigned* count, unsigned* maxSymbolValuePtr,
-                                const void* source, size_t sourceSize,
-                                unsigned checkMax, unsigned* const workSpace)
-{
-    const BYTE* ip = (const BYTE*)source;
-    const BYTE* const iend = ip+sourceSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned max=0;
-    U32* const Counting1 = workSpace;
-    U32* const Counting2 = Counting1 + 256;
-    U32* const Counting3 = Counting2 + 256;
-    U32* const Counting4 = Counting3 + 256;
-
-    memset(workSpace, 0, 4*256*sizeof(unsigned));
-
-    /* safety checks */
-    if (!sourceSize) {
-        memset(count, 0, maxSymbolValue + 1);
-        *maxSymbolValuePtr = 0;
-        return 0;
-    }
-    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
-
-    /* by stripes of 16 bytes */
-    {   U32 cached = MEM_read32(ip); ip += 4;
-        while (ip < iend-15) {
-            U32 c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-        }
-        ip-=4;
-    }
-
-    /* finish last symbols */
-    while (ip<iend) Counting1[*ip++]++;
-
-    if (checkMax) {   /* verify stats will fit into destination table */
-        U32 s; for (s=255; s>maxSymbolValue; s--) {
-            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
-            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
-    }   }
-
-    {   U32 s;
-        if (maxSymbolValue > 255) maxSymbolValue = 255;
-        for (s=0; s<=maxSymbolValue; s++) {
-            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
-            if (count[s] > max) max = count[s];
-    }   }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-    return (size_t)max;
-}
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                          const void* source, size_t sourceSize,
-                          unsigned* workSpace)
-{
-    if (sourceSize < 1500) /* heuristic threshold */
-        return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
-    return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
-}
-
-/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
-size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
-                     const void* source, size_t sourceSize)
-{
-    unsigned tmpCounters[1024];
-    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
-}
-
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* source, size_t sourceSize, unsigned* workSpace)
-{
-    if (*maxSymbolValuePtr < 255)
-        return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
-    *maxSymbolValuePtr = 255;
-    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
-}
-
-size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* src, size_t srcSize)
-{
-    unsigned tmpCounters[1024];
-    return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
-}
-
-
-
 /*-**************************************************************
 *  FSE Compression Code
 ****************************************************************/
-/*! FSE_sizeof_CTable() :
-    FSE_CTable is a variable size structure which contains :
-    `U16 tableLog;`
-    `U16 maxSymbolValue;`
-    `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
-    `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
-Allocation is manual (C standard does not support variable-size structures).
-*/
-size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
-{
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
-}
 
 FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
 {
@@ -466,7 +335,7 @@
 /* provides the minimum logSize to safely represent a distribution */
 static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
 {
-    U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
     U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
     U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
     assert(srcSize > 1); /* Not supported, RLE should be used instead */
@@ -529,6 +398,9 @@
     }
     ToDistribute = (1 << tableLog) - distributed;
 
+    if (ToDistribute == 0)
+        return 0;
+
     if ((total / ToDistribute) > lowOne) {
         /* risk of rounding to zero */
         lowOne = (U32)((total * 3) / (ToDistribute * 2));
@@ -629,11 +501,11 @@
         U32 s;
         U32 nTotal = 0;
         for (s=0; s<=maxSymbolValue; s++)
-            printf("%3i: %4i \n", s, normalizedCounter[s]);
+            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
         for (s=0; s<=maxSymbolValue; s++)
             nTotal += abs(normalizedCounter[s]);
         if (nTotal != (1U<<tableLog))
-            printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
         getchar();
     }
 #endif
@@ -786,7 +658,7 @@
     BYTE* op = ostart;
     BYTE* const oend = ostart + dstSize;
 
-    U32   count[FSE_MAX_SYMBOL_VALUE+1];
+    unsigned count[FSE_MAX_SYMBOL_VALUE+1];
     S16   norm[FSE_MAX_SYMBOL_VALUE+1];
     FSE_CTable* CTable = (FSE_CTable*)workSpace;
     size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
@@ -800,7 +672,7 @@
     if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
+    {   CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
         if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
         if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
         if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
@@ -835,7 +707,7 @@
 size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
 {
     fseWkspMax_t scratchBuffer;
-    FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
+    DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
     if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
     return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
 }
diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c
index 4c66c3b..72bbead 100644
--- a/vendor/github.com/DataDog/zstd/fse_decompress.c
+++ b/vendor/github.com/DataDog/zstd/fse_decompress.c
@@ -49,7 +49,7 @@
 *  Error Management
 ****************************************************************/
 #define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
 
 /* check and forward error code */
 #define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
diff --git a/vendor/github.com/DataDog/zstd/hist.c b/vendor/github.com/DataDog/zstd/hist.c
new file mode 100644
index 0000000..45b7bab
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/hist.c
@@ -0,0 +1,203 @@
+/* ******************************************************************
+   hist : Histogram functions
+   part of Finite State Entropy project
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "mem.h"             /* U32, BYTE, etc. */
+#include "debug.h"           /* assert, DEBUGLOG */
+#include "error_private.h"   /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ *  Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const end = ip + srcSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned largestCount=0;
+
+    memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+    while (ip<end) {
+        assert(*ip <= maxSymbolValue);
+        count[*ip++]++;
+    }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+
+    {   U32 s;
+        for (s=0; s<=maxSymbolValue; s++)
+            if (count[s] > largestCount) largestCount = count[s];
+    }
+
+    return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ *           or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+static size_t HIST_count_parallel_wksp(
+                                unsigned* count, unsigned* maxSymbolValuePtr,
+                                const void* source, size_t sourceSize,
+                                HIST_checkInput_e check,
+                                U32* const workSpace)
+{
+    const BYTE* ip = (const BYTE*)source;
+    const BYTE* const iend = ip+sourceSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+    U32* const Counting1 = workSpace;
+    U32* const Counting2 = Counting1 + 256;
+    U32* const Counting3 = Counting2 + 256;
+    U32* const Counting4 = Counting3 + 256;
+
+    memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+    /* safety checks */
+    if (!sourceSize) {
+        memset(count, 0, maxSymbolValue + 1);
+        *maxSymbolValuePtr = 0;
+        return 0;
+    }
+    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
+
+    /* by stripes of 16 bytes */
+    {   U32 cached = MEM_read32(ip); ip += 4;
+        while (ip < iend-15) {
+            U32 c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+        }
+        ip-=4;
+    }
+
+    /* finish last symbols */
+    while (ip<iend) Counting1[*ip++]++;
+
+    if (check) {   /* verify stats will fit into destination table */
+        U32 s; for (s=255; s>maxSymbolValue; s--) {
+            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+    }   }
+
+    {   U32 s;
+        if (maxSymbolValue > 255) maxSymbolValue = 255;
+        for (s=0; s<=maxSymbolValue; s++) {
+            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+            if (count[s] > max) max = count[s];
+    }   }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+    return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                          const void* source, size_t sourceSize,
+                          void* workSpace, size_t workSpaceSize)
+{
+    if (sourceSize < 1500) /* heuristic threshold */
+        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
+    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                     const void* source, size_t sourceSize)
+{
+    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                       const void* source, size_t sourceSize,
+                       void* workSpace, size_t workSpaceSize)
+{
+    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
+    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+    if (*maxSymbolValuePtr < 255)
+        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+    *maxSymbolValuePtr = 255;
+    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* src, size_t srcSize)
+{
+    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+    return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
+}
diff --git a/vendor/github.com/DataDog/zstd/hist.h b/vendor/github.com/DataDog/zstd/hist.h
new file mode 100644
index 0000000..8b38935
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/hist.h
@@ -0,0 +1,95 @@
+/* ******************************************************************
+   hist : Histogram functions
+   part of Finite State Entropy project
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include <stddef.h>   /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ *  Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ *           or an error code, which can be tested using HIST_isError().
+ *           note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                  const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/** HIST_count_wksp() :
+ *  Same as HIST_count(), but using an externally provided scratch buffer.
+ *  Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                       const void* src, size_t srcSize,
+                       void* workSpace, size_t workSpaceSize);
+
+/** HIST_countFast() :
+ *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                      const void* src, size_t srcSize);
+
+/** HIST_countFast_wksp() :
+ *  Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize,
+                           void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ *  Same as HIST_countFast(), this function is unsafe,
+ *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ *  It is also a bit slower for large inputs.
+ *  However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ *  Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize);
diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h
index b4645b4..6b572c4 100644
--- a/vendor/github.com/DataDog/zstd/huf.h
+++ b/vendor/github.com/DataDog/zstd/huf.h
@@ -1,7 +1,7 @@
 /* ******************************************************************
-   Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2016, Yann Collet.
+   huff0 huffman codec,
+   part of Finite State Entropy library
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -163,25 +163,29 @@
 /* static allocation of HUF's DTable */
 typedef U32 HUF_DTable;
 #define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
         HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
         HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
 
 
 /* ****************************************
 *  Advanced decompression functions
 ******************************************/
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+#endif
 
 size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
 size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
 size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+#endif
 
 
 /* ****************************************
@@ -208,7 +212,7 @@
 typedef enum {
    HUF_repeat_none,  /**< Cannot use the previous table */
    HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
-   HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
  } HUF_repeat;
 /** HUF_compress4X_repeat() :
  *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
@@ -227,7 +231,9 @@
  */
 #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
 #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+                             void* workSpace, size_t wkspSize);
 
 /*! HUF_readStats() :
  *  Read compact Huffman tree, saved by HUF_writeCTable().
@@ -242,10 +248,15 @@
  *  Loading a CTable saved with HUF_writeCTable() */
 size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
 
+/** HUF_getNbBits() :
+ *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ *  Note 1 : is not inlined, as HUF_CElt definition is private
+ *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
 
 /*
  * HUF_decompress() does the following:
- * 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
  * 2. build Huffman table from save, using HUF_readDTableX?()
  * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
  */
@@ -253,13 +264,13 @@
 /** HUF_selectDecoder() :
  *  Tells which decoder is likely to decode faster,
  *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
  *  Assumption : 0 < dstSize <= 128 KB */
 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
 
 /**
  *  The minimum workspace size for the `workSpace` used in
- *  HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
+ *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
  *
  *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
  *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
@@ -270,14 +281,22 @@
 #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
 #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
 
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
 size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
 
 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
 size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
 
 
 /* ====================== */
@@ -298,25 +317,37 @@
                        void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
                        HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
 
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+#endif
 
 size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
 size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+#endif
 
 size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
 
 /* BMI2 variants.
  * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
  */
 size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
 size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
 size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
 
diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c
index 83230b4..f074f1e 100644
--- a/vendor/github.com/DataDog/zstd/huf_compress.c
+++ b/vendor/github.com/DataDog/zstd/huf_compress.c
@@ -45,8 +45,9 @@
 ****************************************************************/
 #include <string.h>     /* memcpy, memset */
 #include <stdio.h>      /* printf (debug) */
-#include "bitstream.h"
 #include "compiler.h"
+#include "bitstream.h"
+#include "hist.h"
 #define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
 #include "fse.h"        /* header compression */
 #define HUF_STATIC_LINKING_ONLY
@@ -58,7 +59,7 @@
 *  Error Management
 ****************************************************************/
 #define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
 #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
 #define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
 
@@ -81,28 +82,28 @@
  * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
  */
 #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
 {
     BYTE* const ostart = (BYTE*) dst;
     BYTE* op = ostart;
     BYTE* const oend = ostart + dstSize;
 
-    U32 maxSymbolValue = HUF_TABLELOG_MAX;
+    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
     U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
 
     FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
     BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
 
-    U32 count[HUF_TABLELOG_MAX+1];
+    unsigned count[HUF_TABLELOG_MAX+1];
     S16 norm[HUF_TABLELOG_MAX+1];
 
     /* init conditions */
     if (wtSize <= 1) return 0;  /* Not compressible */
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+    {   unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
         if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
-        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
     }
 
     tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
@@ -133,7 +134,7 @@
     `CTable` : Huffman tree to save, using huf representation.
     @return : size of saved CTable */
 size_t HUF_writeCTable (void* dst, size_t maxDstSize,
-                        const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
+                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
 {
     BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
     BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
@@ -168,7 +169,7 @@
 }
 
 
-size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
 {
     BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
     U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
@@ -216,6 +217,13 @@
     return readSize;
 }
 
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
+{
+    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
+    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+    return table[symbolValue].nbBits;
+}
+
 
 typedef struct nodeElt_s {
     U32 count;
@@ -307,7 +315,7 @@
     U32 current;
 } rankPos;
 
-static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
 {
     rankPos rank[32];
     U32 n;
@@ -339,7 +347,7 @@
  */
 #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
 typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
 {
     nodeElt* const huffNode0 = (nodeElt*)workSpace;
     nodeElt* const huffNode = huffNode0+1;
@@ -413,7 +421,7 @@
  * @return : maxNbBits
  *  Note : count is used before tree is written, so they can safely overlap
  */
-size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
+size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
 {
     huffNodeTable nodeTable;
     return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
@@ -602,13 +610,14 @@
     return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
 }
 
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
 
 static size_t HUF_compressCTable_internal(
                 BYTE* const ostart, BYTE* op, BYTE* const oend,
                 const void* src, size_t srcSize,
-                unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
+                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
 {
-    size_t const cSize = singleStream ?
+    size_t const cSize = (nbStreams==HUF_singleStream) ?
                          HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
                          HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
     if (HUF_isError(cSize)) { return cSize; }
@@ -620,21 +629,21 @@
 }
 
 typedef struct {
-    U32 count[HUF_SYMBOLVALUE_MAX + 1];
+    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
     HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
     huffNodeTable nodeTable;
 } HUF_compress_tables_t;
 
 /* HUF_compress_internal() :
  * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
-static size_t HUF_compress_internal (
-                void* dst, size_t dstSize,
-                const void* src, size_t srcSize,
-                unsigned maxSymbolValue, unsigned huffLog,
-                unsigned singleStream,
-                void* workSpace, size_t wkspSize,
-                HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
-                const int bmi2)
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+                 const void* src, size_t srcSize,
+                       unsigned maxSymbolValue, unsigned huffLog,
+                       HUF_nbStreams_e nbStreams,
+                       void* workSpace, size_t wkspSize,
+                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+                 const int bmi2)
 {
     HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
     BYTE* const ostart = (BYTE*)dst;
@@ -643,7 +652,7 @@
 
     /* checks & inits */
     if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
-    if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
+    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
     if (!srcSize) return 0;  /* Uncompressed */
     if (!dstSize) return 0;  /* cannot fit anything within dst budget */
     if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
@@ -656,13 +665,13 @@
     if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
         return HUF_compressCTable_internal(ostart, op, oend,
                                            src, srcSize,
-                                           singleStream, oldHufTable, bmi2);
+                                           nbStreams, oldHufTable, bmi2);
     }
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
+    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
         if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
-        if (largest <= (srcSize >> 7)+1) return 0;   /* heuristic : probably not compressible enough */
+        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
     }
 
     /* Check validity of previous table */
@@ -675,14 +684,15 @@
     if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
         return HUF_compressCTable_internal(ostart, op, oend,
                                            src, srcSize,
-                                           singleStream, oldHufTable, bmi2);
+                                           nbStreams, oldHufTable, bmi2);
     }
 
     /* Build Huffman Tree */
     huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
-    {   CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
-                                                maxSymbolValue, huffLog,
-                                                table->nodeTable, sizeof(table->nodeTable)) );
+    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+                                            maxSymbolValue, huffLog,
+                                            table->nodeTable, sizeof(table->nodeTable));
+        CHECK_F(maxBits);
         huffLog = (U32)maxBits;
         /* Zero unused symbols in CTable, so we can check it for validity */
         memset(table->CTable + (maxSymbolValue + 1), 0,
@@ -698,7 +708,7 @@
             if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
                 return HUF_compressCTable_internal(ostart, op, oend,
                                                    src, srcSize,
-                                                   singleStream, oldHufTable, bmi2);
+                                                   nbStreams, oldHufTable, bmi2);
         }   }
 
         /* Use the new huffman table */
@@ -710,7 +720,7 @@
     }
     return HUF_compressCTable_internal(ostart, op, oend,
                                        src, srcSize,
-                                       singleStream, table->CTable, bmi2);
+                                       nbStreams, table->CTable, bmi2);
 }
 
 
@@ -720,7 +730,7 @@
                       void* workSpace, size_t wkspSize)
 {
     return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, 1 /*single stream*/,
+                                 maxSymbolValue, huffLog, HUF_singleStream,
                                  workSpace, wkspSize,
                                  NULL, NULL, 0, 0 /*bmi2*/);
 }
@@ -732,7 +742,7 @@
                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
 {
     return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, 1 /*single stream*/,
+                                 maxSymbolValue, huffLog, HUF_singleStream,
                                  workSpace, wkspSize, hufTable,
                                  repeat, preferRepeat, bmi2);
 }
@@ -754,7 +764,7 @@
                       void* workSpace, size_t wkspSize)
 {
     return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, 0 /*4 streams*/,
+                                 maxSymbolValue, huffLog, HUF_fourStreams,
                                  workSpace, wkspSize,
                                  NULL, NULL, 0, 0 /*bmi2*/);
 }
@@ -769,7 +779,7 @@
                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
 {
     return HUF_compress_internal(dst, dstSize, src, srcSize,
-                                 maxSymbolValue, huffLog, 0 /* 4 streams */,
+                                 maxSymbolValue, huffLog, HUF_fourStreams,
                                  workSpace, wkspSize,
                                  hufTable, repeat, preferRepeat, bmi2);
 }
diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c
index 73f5c46..3f8bd29 100644
--- a/vendor/github.com/DataDog/zstd/huf_decompress.c
+++ b/vendor/github.com/DataDog/zstd/huf_decompress.c
@@ -1,6 +1,7 @@
 /* ******************************************************************
-   Huffman decoder, part of New Generation Entropy library
-   Copyright (C) 2013-2016, Yann Collet.
+   huff0 huffman decoder,
+   part of Finite State Entropy library
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -29,26 +30,37 @@
 
     You can contact the author at :
     - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
 ****************************************************************** */
 
 /* **************************************************************
 *  Dependencies
 ****************************************************************/
 #include <string.h>     /* memcpy, memset */
-#include "bitstream.h"  /* BIT_* */
 #include "compiler.h"
-#include "fse.h"        /* header compression */
+#include "bitstream.h"  /* BIT_* */
+#include "fse.h"        /* to compress headers */
 #define HUF_STATIC_LINKING_ONLY
 #include "huf.h"
 #include "error_private.h"
 
+/* **************************************************************
+*  Macros
+****************************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * Huffman decompression implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(HUF_FORCE_DECOMPRESS_X1) && \
+    defined(HUF_FORCE_DECOMPRESS_X2)
+#error "Cannot force the use of the X1 and X2 decoders at the same time!"
+#endif
+
 
 /* **************************************************************
 *  Error Management
 ****************************************************************/
 #define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
 #define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
 
 
@@ -59,6 +71,51 @@
 #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
 
 
+/* **************************************************************
+*  BMI2 Variant Wrappers
+****************************************************************/
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn)                                                        \
+                                                                            \
+    static size_t fn##_default(                                             \
+                  void* dst,  size_t dstSize,                               \
+            const void* cSrc, size_t cSrcSize,                              \
+            const HUF_DTable* DTable)                                       \
+    {                                                                       \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }                                                                       \
+                                                                            \
+    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
+                  void* dst,  size_t dstSize,                               \
+            const void* cSrc, size_t cSrcSize,                              \
+            const HUF_DTable* DTable)                                       \
+    {                                                                       \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }                                                                       \
+                                                                            \
+    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
+                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
+    {                                                                       \
+        if (bmi2) {                                                         \
+            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
+        }                                                                   \
+        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
+    }
+
+#else
+
+#define HUF_DGEN(fn)                                                        \
+    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
+                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
+    {                                                                       \
+        (void)bmi2;                                                         \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }
+
+#endif
+
+
 /*-***************************/
 /*  generic DTableDesc       */
 /*-***************************/
@@ -72,18 +129,20 @@
 }
 
 
+#ifndef HUF_FORCE_DECOMPRESS_X2
+
 /*-***************************/
 /*  single-symbol decoding   */
 /*-***************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
 
-size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
 {
     U32 tableLog = 0;
     U32 nbSymbols = 0;
     size_t iSize;
     void* const dtPtr = DTable + 1;
-    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
 
     U32* rankVal;
     BYTE* huffWeight;
@@ -96,7 +155,7 @@
 
     if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
 
-    HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
     /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
 
     iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
@@ -124,7 +183,7 @@
             U32 const w = huffWeight[n];
             U32 const length = (1 << w) >> 1;
             U32 u;
-            HUF_DEltX2 D;
+            HUF_DEltX1 D;
             D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
             for (u = rankVal[w]; u < rankVal[w] + length; u++)
                 dt[u] = D;
@@ -134,17 +193,15 @@
     return iSize;
 }
 
-size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_readDTableX2_wksp(DTable, src, srcSize,
+    return HUF_readDTableX1_wksp(DTable, src, srcSize,
                                  workSpace, sizeof(workSpace));
 }
 
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
-
 FORCE_INLINE_TEMPLATE BYTE
-HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
 {
     size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
     BYTE const c = dt[val].byte;
@@ -152,44 +209,44 @@
     return c;
 }
 
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
 
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)  \
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
     if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
 
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
     if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
 
 HINT_INLINE size_t
-HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
 {
     BYTE* const pStart = p;
 
     /* up to 4 symbols at a time */
     while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
     }
 
     /* [0-3] symbols remaining */
     if (MEM_32bits())
         while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
-            HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
 
     /* no more data to retrieve from bitstream, no need to reload */
     while (p < pEnd)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
 
     return pEnd-pStart;
 }
 
 FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X2_usingDTable_internal_body(
+HUF_decompress1X1_usingDTable_internal_body(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
@@ -197,14 +254,14 @@
     BYTE* op = (BYTE*)dst;
     BYTE* const oend = op + dstSize;
     const void* dtPtr = DTable + 1;
-    const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
     BIT_DStream_t bitD;
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
     U32 const dtLog = dtd.tableLog;
 
     CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
 
-    HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
 
     if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
 
@@ -212,7 +269,7 @@
 }
 
 FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X2_usingDTable_internal_body(
+HUF_decompress4X1_usingDTable_internal_body(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
@@ -224,7 +281,7 @@
         BYTE* const ostart = (BYTE*) dst;
         BYTE* const oend = ostart + dstSize;
         const void* const dtPtr = DTable + 1;
-        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
 
         /* Init */
         BIT_DStream_t bitD1;
@@ -260,22 +317,22 @@
         /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
         endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
         while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
             BIT_reloadDStream(&bitD1);
             BIT_reloadDStream(&bitD2);
             BIT_reloadDStream(&bitD3);
@@ -291,191 +348,10 @@
         /* note : op4 supposed already verified within main loop */
 
         /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-          if (!endCheck) return ERROR(corruption_detected); }
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
-    }   }
-    return 1;
-}
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-HINT_INLINE size_t
-HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
-                const HUF_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to end : up to 2 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X4_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    BIT_DStream_t bitD;
-
-    /* Init */
-    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
-
-    /* decode */
-    {   BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
-        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
-    }
-
-    /* check */
-    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X4_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;
-        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        size_t const segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
-        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
-        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
 
         /* check */
         { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
@@ -491,153 +367,119 @@
                                                const void *cSrc,
                                                size_t cSrcSize,
                                                const HUF_DTable *DTable);
-#if DYNAMIC_BMI2
 
-#define X(fn)                                                               \
-                                                                            \
-    static size_t fn##_default(                                             \
-                  void* dst,  size_t dstSize,                               \
-            const void* cSrc, size_t cSrcSize,                              \
-            const HUF_DTable* DTable)                                       \
-    {                                                                       \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }                                                                       \
-                                                                            \
-    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
-                  void* dst,  size_t dstSize,                               \
-            const void* cSrc, size_t cSrcSize,                              \
-            const HUF_DTable* DTable)                                       \
-    {                                                                       \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }                                                                       \
-                                                                            \
-    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
-                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
-    {                                                                       \
-        if (bmi2) {                                                         \
-            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
-        }                                                                   \
-        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
-    }
-
-#else
-
-#define X(fn)                                                               \
-    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
-                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
-    {                                                                       \
-        (void)bmi2;                                                         \
-        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
-    }
-
-#endif
-
-X(HUF_decompress1X2_usingDTable_internal)
-X(HUF_decompress4X2_usingDTable_internal)
-X(HUF_decompress1X4_usingDTable_internal)
-X(HUF_decompress4X4_usingDTable_internal)
-
-#undef X
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
 
 
-size_t HUF_decompress1X2_usingDTable(
+
+size_t HUF_decompress1X1_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
 }
 
 
-size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
                               const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
-size_t HUF_decompress4X2_usingDTable(
+size_t HUF_decompress4X1_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
+    size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
                                                 workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
 
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
-    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
 }
 
 
-size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
+#endif /* HUF_FORCE_DECOMPRESS_X2 */
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
 
 /* *************************/
 /* double-symbols decoding */
 /* *************************/
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
 
-/* HUF_fillDTableX4Level2() :
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+
+/* HUF_fillDTableX2Level2() :
  * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
                            const U32* rankValOrigin, const int minWeight,
                            const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
                            U32 nbBitsBaseline, U16 baseSeq)
 {
-    HUF_DEltX4 DElt;
+    HUF_DEltX2 DElt;
     U32 rankVal[HUF_TABLELOG_MAX + 1];
 
     /* get pre-calculated rankVal */
@@ -672,10 +514,8 @@
     }   }
 }
 
-typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
-typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
 
-static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
                            const sortedSymbol_t* sortedList, const U32 sortedListSize,
                            const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
                            const U32 nbBitsBaseline)
@@ -700,12 +540,12 @@
             int minWeight = nbBits + scaleLog;
             if (minWeight < 1) minWeight = 1;
             sortedRank = rankStart[minWeight];
-            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
                            rankValOrigin[nbBits], minWeight,
                            sortedList+sortedRank, sortedListSize-sortedRank,
                            nbBitsBaseline, symbol);
         } else {
-            HUF_DEltX4 DElt;
+            HUF_DEltX2 DElt;
             MEM_writeLE16(&(DElt.sequence), symbol);
             DElt.nbBits = (BYTE)(nbBits);
             DElt.length = 1;
@@ -717,16 +557,16 @@
     }
 }
 
-size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
-                             size_t srcSize, void* workSpace,
-                             size_t wkspSize)
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+                       const void* src, size_t srcSize,
+                             void* workSpace, size_t wkspSize)
 {
     U32 tableLog, maxW, sizeOfSort, nbSymbols;
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     U32 const maxTableLog = dtd.maxTableLog;
     size_t iSize;
     void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
-    HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
     U32 *rankStart;
 
     rankValCol_t* rankVal;
@@ -752,7 +592,7 @@
     rankStart = rankStart0 + 1;
     memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
 
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
+    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
     if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
     /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
 
@@ -806,7 +646,7 @@
                     rankValPtr[w] = rankVal0[w] >> consumed;
     }   }   }   }
 
-    HUF_fillDTableX4(dt, maxTableLog,
+    HUF_fillDTableX2(dt, maxTableLog,
                    sortedSymbol, sizeOfSort,
                    rankStart0, rankVal, maxW,
                    tableLog+1);
@@ -817,112 +657,308 @@
     return iSize;
 }
 
-size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
 {
   U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-  return HUF_readDTableX4_wksp(DTable, src, srcSize,
+  return HUF_readDTableX2_wksp(DTable, src, srcSize,
                                workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress1X4_usingDTable(
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+    }   }
+    return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+                const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to end : up to 2 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BIT_DStream_t bitD;
+
+    /* Init */
+    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+    /* decode */
+    {   BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+    }
+
+    /* check */
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        size_t const segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
+    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
                                                workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
 }
 
 
-size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
                               const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
-size_t HUF_decompress4X4_usingDTable(
+size_t HUF_decompress4X2_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
+    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
                                          workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
 
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
-    return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
 }
 
 
-size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
                               const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
+#endif /* HUF_FORCE_DECOMPRESS_X1 */
 
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
 
 size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
                                     const void* cSrc, size_t cSrcSize,
                                     const HUF_DTable* DTable)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+    (void)dtd;
+    assert(dtd.tableType == 0);
+    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+    (void)dtd;
+    assert(dtd.tableType == 1);
+    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
 }
 
 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
@@ -930,11 +966,22 @@
                                     const HUF_DTable* DTable)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+    (void)dtd;
+    assert(dtd.tableType == 0);
+    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+    (void)dtd;
+    assert(dtd.tableType == 1);
+    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
 }
 
 
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
 typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
 static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
 {
@@ -956,16 +1003,26 @@
     {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
     {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
 };
+#endif
 
 /** HUF_selectDecoder() :
  *  Tells which decoder is likely to decode faster,
  *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
  *  Assumption : 0 < dstSize <= 128 KB */
 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
 {
     assert(dstSize > 0);
-    assert(dstSize <= 128 KB);
+    assert(dstSize <= 128*1024);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+    (void)dstSize;
+    (void)cSrcSize;
+    return 0;
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+    (void)dstSize;
+    (void)cSrcSize;
+    return 1;
+#else
     /* decoder timing evaluation */
     {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
         U32 const D256 = (U32)(dstSize >> 8);
@@ -973,14 +1030,18 @@
         U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
         DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
         return DTime1 < DTime0;
-}   }
+    }
+#endif
+}
 
 
 typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
 
 size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 };
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+    static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
+#endif
 
     /* validation checks */
     if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -989,7 +1050,17 @@
     if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+        (void)algoNb;
+        assert(algoNb == 0);
+        return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+        (void)algoNb;
+        assert(algoNb == 1);
+        return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
+#else
         return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+#endif
     }
 }
 
@@ -1002,8 +1073,18 @@
     if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+        (void)algoNb;
+        assert(algoNb == 0);
+        return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+        (void)algoNb;
+        assert(algoNb == 1);
+        return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize);
+#else
+        return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+#endif
     }
 }
 
@@ -1025,8 +1106,19 @@
     if (cSrcSize == 0) return ERROR(corruption_detected);
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
-                        HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+        (void)algoNb;
+        assert(algoNb == 0);
+        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+        (void)algoNb;
+        assert(algoNb == 1);
+        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#else
+        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                            cSrcSize, workSpace, wkspSize):
+                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#endif
     }
 }
 
@@ -1041,10 +1133,22 @@
     if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
-                                cSrcSize, workSpace, wkspSize):
-                        HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+        (void)algoNb;
+        assert(algoNb == 0);
+        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
                                 cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+        (void)algoNb;
+        assert(algoNb == 1);
+        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                                cSrcSize, workSpace, wkspSize);
+#else
+        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                                cSrcSize, workSpace, wkspSize):
+                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                                cSrcSize, workSpace, wkspSize);
+#endif
     }
 }
 
@@ -1060,27 +1164,49 @@
 size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+    (void)dtd;
+    assert(dtd.tableType == 0);
+    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+    (void)dtd;
+    assert(dtd.tableType == 1);
+    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
 }
 
-size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+    size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
+#endif
 
 size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+    (void)dtd;
+    assert(dtd.tableType == 0);
+    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+    (void)dtd;
+    assert(dtd.tableType == 1);
+    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
 }
 
 size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
@@ -1090,7 +1216,17 @@
     if (cSrcSize == 0) return ERROR(corruption_detected);
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
-                        HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+        (void)algoNb;
+        assert(algoNb == 0);
+        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+        (void)algoNb;
+        assert(algoNb == 1);
+        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#else
+        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#endif
     }
 }
diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h
index 47d2300..5da2487 100644
--- a/vendor/github.com/DataDog/zstd/mem.h
+++ b/vendor/github.com/DataDog/zstd/mem.h
@@ -39,6 +39,10 @@
 #  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
 #endif
 
+#ifndef __has_builtin
+#  define __has_builtin(x) 0  /* compat. with non-clang compilers */
+#endif
+
 /* code only tested on 32 and 64 bits systems */
 #define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
@@ -57,11 +61,23 @@
   typedef  uint64_t U64;
   typedef   int64_t S64;
 #else
+# include <limits.h>
+#if CHAR_BIT != 8
+#  error "this implementation requires char to be exactly 8-bit type"
+#endif
   typedef unsigned char      BYTE;
+#if USHRT_MAX != 65535
+#  error "this implementation requires short to be exactly 16-bit type"
+#endif
   typedef unsigned short      U16;
   typedef   signed short      S16;
+#if UINT_MAX != 4294967295
+#  error "this implementation requires int to be exactly 32-bit type"
+#endif
   typedef unsigned int        U32;
   typedef   signed int        S32;
+/* note : there are no limits defined for long long type in C90.
+ * limits exist in C99, however, in such case, <stdint.h> is preferred */
   typedef unsigned long long  U64;
   typedef   signed long long  S64;
 #endif
@@ -186,7 +202,8 @@
 {
 #if defined(_MSC_VER)     /* Visual Studio */
     return _byteswap_ulong(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
+  || (defined(__clang__) && __has_builtin(__builtin_bswap32))
     return __builtin_bswap32(in);
 #else
     return  ((in << 24) & 0xff000000 ) |
@@ -200,7 +217,8 @@
 {
 #if defined(_MSC_VER)     /* Visual Studio */
     return _byteswap_uint64(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
+  || (defined(__clang__) && __has_builtin(__builtin_bswap64))
     return __builtin_bswap64(in);
 #else
     return  ((in << 56) & 0xff00000000000000ULL) |
diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c
index 773488b..7a82945 100644
--- a/vendor/github.com/DataDog/zstd/pool.c
+++ b/vendor/github.com/DataDog/zstd/pool.c
@@ -10,9 +10,10 @@
 
 
 /* ======   Dependencies   ======= */
-#include <stddef.h>  /* size_t */
-#include "pool.h"
+#include <stddef.h>    /* size_t */
+#include "debug.h"     /* assert */
 #include "zstd_internal.h"  /* ZSTD_malloc, ZSTD_free */
+#include "pool.h"
 
 /* ======   Compiler specifics   ====== */
 #if defined(_MSC_VER)
@@ -33,8 +34,9 @@
 struct POOL_ctx_s {
     ZSTD_customMem customMem;
     /* Keep track of the threads */
-    ZSTD_pthread_t *threads;
-    size_t numThreads;
+    ZSTD_pthread_t* threads;
+    size_t threadCapacity;
+    size_t threadLimit;
 
     /* The queue is a circular buffer */
     POOL_job *queue;
@@ -58,10 +60,10 @@
 };
 
 /* POOL_thread() :
-   Work thread for the thread pool.
-   Waits for jobs and executes them.
-   @returns : NULL on failure else non-null.
-*/
+ * Work thread for the thread pool.
+ * Waits for jobs and executes them.
+ * @returns : NULL on failure else non-null.
+ */
 static void* POOL_thread(void* opaque) {
     POOL_ctx* const ctx = (POOL_ctx*)opaque;
     if (!ctx) { return NULL; }
@@ -69,50 +71,55 @@
         /* Lock the mutex and wait for a non-empty queue or until shutdown */
         ZSTD_pthread_mutex_lock(&ctx->queueMutex);
 
-        while (ctx->queueEmpty && !ctx->shutdown) {
+        while ( ctx->queueEmpty
+            || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
+            if (ctx->shutdown) {
+                /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
+                 * a few threads will be shutdown while !queueEmpty,
+                 * but enough threads will remain active to finish the queue */
+                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+                return opaque;
+            }
             ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
         }
-        /* empty => shutting down: so stop */
-        if (ctx->queueEmpty) {
-            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-            return opaque;
-        }
         /* Pop a job off the queue */
         {   POOL_job const job = ctx->queue[ctx->queueHead];
             ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
             ctx->numThreadsBusy++;
             ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
             /* Unlock the mutex, signal a pusher, and run the job */
-            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
             ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
 
             job.function(job.opaque);
 
             /* If the intended queue size was 0, signal after finishing job */
+            ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+            ctx->numThreadsBusy--;
             if (ctx->queueSize == 1) {
-                ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-                ctx->numThreadsBusy--;
-                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
                 ZSTD_pthread_cond_signal(&ctx->queuePushCond);
-        }   }
+            }
+            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+        }
     }  /* for (;;) */
-    /* Unreachable */
+    assert(0);  /* Unreachable */
 }
 
 POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
     return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
 }
 
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+                               ZSTD_customMem customMem) {
     POOL_ctx* ctx;
-    /* Check the parameters */
+    /* Check parameters */
     if (!numThreads) { return NULL; }
     /* Allocate the context and zero initialize */
     ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
     if (!ctx) { return NULL; }
     /* Initialize the job queue.
-     * It needs one extra space since one space is wasted to differentiate empty
-     * and full queues.
+     * It needs one extra space since one space is wasted to differentiate
+     * empty and full queues.
      */
     ctx->queueSize = queueSize + 1;
     ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
@@ -126,7 +133,7 @@
     ctx->shutdown = 0;
     /* Allocate space for the thread handles */
     ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
-    ctx->numThreads = 0;
+    ctx->threadCapacity = 0;
     ctx->customMem = customMem;
     /* Check for errors */
     if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
@@ -134,11 +141,12 @@
     {   size_t i;
         for (i = 0; i < numThreads; ++i) {
             if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
-                ctx->numThreads = i;
+                ctx->threadCapacity = i;
                 POOL_free(ctx);
                 return NULL;
         }   }
-        ctx->numThreads = numThreads;
+        ctx->threadCapacity = numThreads;
+        ctx->threadLimit = numThreads;
     }
     return ctx;
 }
@@ -156,8 +164,8 @@
     ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
     /* Join all of the threads */
     {   size_t i;
-        for (i = 0; i < ctx->numThreads; ++i) {
-            ZSTD_pthread_join(ctx->threads[i], NULL);
+        for (i = 0; i < ctx->threadCapacity; ++i) {
+            ZSTD_pthread_join(ctx->threads[i], NULL);  /* note : could fail */
     }   }
 }
 
@@ -172,24 +180,68 @@
     ZSTD_free(ctx, ctx->customMem);
 }
 
+
+
 size_t POOL_sizeof(POOL_ctx *ctx) {
     if (ctx==NULL) return 0;  /* supports sizeof NULL */
     return sizeof(*ctx)
         + ctx->queueSize * sizeof(POOL_job)
-        + ctx->numThreads * sizeof(ZSTD_pthread_t);
+        + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
+}
+
+
+/* @return : 0 on success, 1 on error */
+static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
+{
+    if (numThreads <= ctx->threadCapacity) {
+        if (!numThreads) return 1;
+        ctx->threadLimit = numThreads;
+        return 0;
+    }
+    /* numThreads > threadCapacity */
+    {   ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
+        if (!threadPool) return 1;
+        /* replace existing thread pool */
+        memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
+        ZSTD_free(ctx->threads, ctx->customMem);
+        ctx->threads = threadPool;
+        /* Initialize additional threads */
+        {   size_t threadId;
+            for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
+                if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
+                    ctx->threadCapacity = threadId;
+                    return 1;
+            }   }
+    }   }
+    /* successfully expanded */
+    ctx->threadCapacity = numThreads;
+    ctx->threadLimit = numThreads;
+    return 0;
+}
+
+/* @return : 0 on success, 1 on error */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads)
+{
+    int result;
+    if (ctx==NULL) return 1;
+    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+    result = POOL_resize_internal(ctx, numThreads);
+    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+    return result;
 }
 
 /**
  * Returns 1 if the queue is full and 0 otherwise.
  *
- * If the queueSize is 1 (the pool was created with an intended queueSize of 0),
- * then a queue is empty if there is a thread free and no job is waiting.
+ * When queueSize is 1 (pool was created with an intended queueSize of 0),
+ * then a queue is empty if there is a thread free _and_ no job is waiting.
  */
 static int isQueueFull(POOL_ctx const* ctx) {
     if (ctx->queueSize > 1) {
         return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
     } else {
-        return ctx->numThreadsBusy == ctx->numThreads ||
+        return (ctx->numThreadsBusy == ctx->threadLimit) ||
                !ctx->queueEmpty;
     }
 }
@@ -263,6 +315,11 @@
     (void)ctx;
 }
 
+int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
+    (void)ctx; (void)numThreads;
+    return 0;
+}
+
 void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
     (void)ctx;
     function(opaque);
diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h
index a57e9b4..458d37f 100644
--- a/vendor/github.com/DataDog/zstd/pool.h
+++ b/vendor/github.com/DataDog/zstd/pool.h
@@ -30,40 +30,50 @@
 */
 POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
 
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+                               ZSTD_customMem customMem);
 
 /*! POOL_free() :
-    Free a thread pool returned by POOL_create().
-*/
+ *  Free a thread pool returned by POOL_create().
+ */
 void POOL_free(POOL_ctx* ctx);
 
+/*! POOL_resize() :
+ *  Expands or shrinks pool's number of threads.
+ *  This is more efficient than releasing + creating a new context,
+ *  since it tries to preserve and re-use existing threads.
+ * `numThreads` must be at least 1.
+ * @return : 0 when resize was successful,
+ *           !0 (typically 1) if there is an error.
+ *    note : only numThreads can be resized, queueSize remains unchanged.
+ */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads);
+
 /*! POOL_sizeof() :
-    return memory usage of pool returned by POOL_create().
-*/
+ * @return threadpool memory usage
+ *  note : compatible with NULL (returns 0 in this case)
+ */
 size_t POOL_sizeof(POOL_ctx* ctx);
 
 /*! POOL_function :
-    The function type that can be added to a thread pool.
-*/
+ *  The function type that can be added to a thread pool.
+ */
 typedef void (*POOL_function)(void*);
-/*! POOL_add_function :
-    The function type for a generic thread pool add function.
-*/
-typedef void (*POOL_add_function)(void*, POOL_function, void*);
 
 /*! POOL_add() :
-    Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
-    Possibly blocks until there is room in the queue.
-    Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
-*/
+ *  Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
+ *  Possibly blocks until there is room in the queue.
+ *  Note : The function may be executed asynchronously,
+ *         therefore, `opaque` must live until function has been completed.
+ */
 void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
 
 
 /*! POOL_tryAdd() :
-    Add the job `function(opaque)` to the thread pool if a worker is available.
-    return immediately otherwise.
-   @return : 1 if successful, 0 if not.
-*/
+ *  Add the job `function(opaque)` to thread pool _if_ a worker is available.
+ *  Returns immediately even if not (does not block).
+ * @return : 1 if successful, 0 if not.
+ */
 int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
 
 
diff --git a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c
index 8be8c8d..f3d4fa8 100644
--- a/vendor/github.com/DataDog/zstd/threading.c
+++ b/vendor/github.com/DataDog/zstd/threading.c
@@ -14,8 +14,8 @@
  * This file will hold wrapper for systems, which do not support pthreads
  */
 
-/* create fake symbol to avoid empty trnaslation unit warning */
-int g_ZSTD_threading_useles_symbol;
+/* create fake symbol to avoid empty translation unit warning */
+int g_ZSTD_threading_useless_symbol;
 
 #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
 
diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c
index 9d9c0e9..30599aa 100644
--- a/vendor/github.com/DataDog/zstd/xxhash.c
+++ b/vendor/github.com/DataDog/zstd/xxhash.c
@@ -66,10 +66,10 @@
 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
 
 /*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
  * Results are therefore identical for little-endian and big-endian CPU.
  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
  * to improve speed for Big-endian CPU.
  * This option has no impact on Little_Endian CPU.
  */
@@ -98,6 +98,7 @@
 /* Modify the local functions below should you wish to use some other memory routines */
 /* for malloc(), free() */
 #include <stdlib.h>
+#include <stddef.h>     /* size_t */
 static void* XXH_malloc(size_t s) { return malloc(s); }
 static void  XXH_free  (void* p)  { free(p); }
 /* for memcpy() */
diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c
index 7d24e49..ee21ee1 100644
--- a/vendor/github.com/DataDog/zstd/zdict.c
+++ b/vendor/github.com/DataDog/zstd/zdict.c
@@ -255,15 +255,15 @@
     }
 
     {   int i;
-        U32 searchLength;
+        U32 mml;
         U32 refinedStart = start;
         U32 refinedEnd = end;
 
         DISPLAYLEVEL(4, "\n");
-        DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u  ", (U32)(end-start), MINMATCHLENGTH, (U32)pos);
+        DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u  ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
         DISPLAYLEVEL(4, "\n");
 
-        for (searchLength = MINMATCHLENGTH ; ; searchLength++) {
+        for (mml = MINMATCHLENGTH ; ; mml++) {
             BYTE currentChar = 0;
             U32 currentCount = 0;
             U32 currentID = refinedStart;
@@ -271,13 +271,13 @@
             U32 selectedCount = 0;
             U32 selectedID = currentID;
             for (id =refinedStart; id < refinedEnd; id++) {
-                if (b[suffix[id] + searchLength] != currentChar) {
+                if (b[suffix[id] + mml] != currentChar) {
                     if (currentCount > selectedCount) {
                         selectedCount = currentCount;
                         selectedID = currentID;
                     }
                     currentID = id;
-                    currentChar = b[ suffix[id] + searchLength];
+                    currentChar = b[ suffix[id] + mml];
                     currentCount = 0;
                 }
                 currentCount ++;
@@ -293,7 +293,7 @@
             refinedEnd = refinedStart + selectedCount;
         }
 
-        /* evaluate gain based on new ref */
+        /* evaluate gain based on new dict */
         start = refinedStart;
         pos = suffix[refinedStart];
         end = start;
@@ -341,8 +341,8 @@
         for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
             savings[i] = savings[i-1] + (lengthList[i] * (i-3));
 
-        DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f)  \n",
-                     (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength);
+        DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f)  \n",
+                     (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
 
         solution.pos = (U32)pos;
         solution.length = (U32)maxLength;
@@ -497,7 +497,7 @@
 static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
                             const void* const buffer, size_t bufferSize,   /* buffer must end with noisy guard band */
                             const size_t* fileSizes, unsigned nbFiles,
-                            U32 minRatio, U32 notificationLevel)
+                            unsigned minRatio, U32 notificationLevel)
 {
     int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
     int* const suffix = suffix0+1;
@@ -523,11 +523,11 @@
     memset(doneMarks, 0, bufferSize+16);
 
     /* limit sample set size (divsufsort limitation)*/
-    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20));
+    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
     while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
 
     /* sort */
-    DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20));
+    DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
     {   int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
         if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
     }
@@ -581,7 +581,7 @@
 
 typedef struct
 {
-    ZSTD_CCtx* ref;    /* contains reference to dictionary */
+    ZSTD_CDict* dict;    /* dictionary */
     ZSTD_CCtx* zc;     /* working context */
     void* workPlace;   /* must be ZSTD_BLOCKSIZE_MAX allocated */
 } EStats_ress_t;
@@ -589,7 +589,7 @@
 #define MAXREPOFFSET 1024
 
 static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
-                              U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
+                              unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
                               const void* src, size_t srcSize,
                               U32 notificationLevel)
 {
@@ -597,11 +597,12 @@
     size_t cSize;
 
     if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */
-    {   size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0);
-        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
+    {   size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
+        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
+
     }
     cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
-    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
 
     if (cSize) {  /* if == 0; block is not compressible */
         const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
@@ -670,7 +671,7 @@
  * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
  * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
  */
-static void ZDICT_flatLit(U32* countLit)
+static void ZDICT_flatLit(unsigned* countLit)
 {
     int u;
     for (u=1; u<256; u++) countLit[u] = 2;
@@ -686,18 +687,18 @@
                              const void* dictBuffer, size_t  dictBufferSize,
                                    unsigned notificationLevel)
 {
-    U32 countLit[256];
+    unsigned countLit[256];
     HUF_CREATE_STATIC_CTABLE(hufTable, 255);
-    U32 offcodeCount[OFFCODE_MAX+1];
+    unsigned offcodeCount[OFFCODE_MAX+1];
     short offcodeNCount[OFFCODE_MAX+1];
     U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
-    U32 matchLengthCount[MaxML+1];
+    unsigned matchLengthCount[MaxML+1];
     short matchLengthNCount[MaxML+1];
-    U32 litLengthCount[MaxLL+1];
+    unsigned litLengthCount[MaxLL+1];
     short litLengthNCount[MaxLL+1];
     U32 repOffset[MAXREPOFFSET];
     offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
-    EStats_ress_t esr;
+    EStats_ress_t esr = { NULL, NULL, NULL };
     ZSTD_parameters params;
     U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
     size_t pos = 0, errorCode;
@@ -708,14 +709,6 @@
 
     /* init */
     DEBUGLOG(4, "ZDICT_analyzeEntropy");
-    esr.ref = ZSTD_createCCtx();
-    esr.zc = ZSTD_createCCtx();
-    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
-    if (!esr.ref || !esr.zc || !esr.workPlace) {
-        eSize = ERROR(memory_allocation);
-        DISPLAYLEVEL(1, "Not enough memory \n");
-        goto _cleanup;
-    }
     if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; }   /* too large dictionary */
     for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */
     for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
@@ -724,14 +717,17 @@
     memset(repOffset, 0, sizeof(repOffset));
     repOffset[1] = repOffset[4] = repOffset[8] = 1;
     memset(bestRepOffset, 0, sizeof(bestRepOffset));
-    if (compressionLevel<=0) compressionLevel = g_compressionLevel_default;
+    if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
     params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
-    {   size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
-        if (ZSTD_isError(beginResult)) {
-            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult));
-            eSize = ERROR(GENERIC);
-            goto _cleanup;
-    }   }
+
+    esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
+    esr.zc = ZSTD_createCCtx();
+    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
+    if (!esr.dict || !esr.zc || !esr.workPlace) {
+        eSize = ERROR(memory_allocation);
+        DISPLAYLEVEL(1, "Not enough memory \n");
+        goto _cleanup;
+    }
 
     /* collect stats on all samples */
     for (u=0; u<nbFiles; u++) {
@@ -745,7 +741,7 @@
     /* analyze, build stats, starting with literals */
     {   size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
         if (HUF_isError(maxNbBits)) {
-            eSize = ERROR(GENERIC);
+            eSize = maxNbBits;
             DISPLAYLEVEL(1, " HUF_buildCTable error \n");
             goto _cleanup;
         }
@@ -768,7 +764,7 @@
     total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
     errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
         goto _cleanup;
     }
@@ -777,7 +773,7 @@
     total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
     errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
         goto _cleanup;
     }
@@ -786,7 +782,7 @@
     total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
     errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
     if (FSE_isError(errorCode)) {
-        eSize = ERROR(GENERIC);
+        eSize = errorCode;
         DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
         goto _cleanup;
     }
@@ -795,7 +791,7 @@
     /* write result to buffer */
     {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
         if (HUF_isError(hhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = hhSize;
             DISPLAYLEVEL(1, "HUF_writeCTable error \n");
             goto _cleanup;
         }
@@ -806,7 +802,7 @@
 
     {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
         if (FSE_isError(ohSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = ohSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
             goto _cleanup;
         }
@@ -817,7 +813,7 @@
 
     {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
         if (FSE_isError(mhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = mhSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
             goto _cleanup;
         }
@@ -828,7 +824,7 @@
 
     {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
         if (FSE_isError(lhSize)) {
-            eSize = ERROR(GENERIC);
+            eSize = lhSize;
             DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
             goto _cleanup;
         }
@@ -838,7 +834,7 @@
     }
 
     if (maxDstSize<12) {
-        eSize = ERROR(GENERIC);
+        eSize = ERROR(dstSize_tooSmall);
         DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
         goto _cleanup;
     }
@@ -856,7 +852,7 @@
     eSize += 12;
 
 _cleanup:
-    ZSTD_freeCCtx(esr.ref);
+    ZSTD_freeCDict(esr.dict);
     ZSTD_freeCCtx(esr.zc);
     free(esr.workPlace);
 
@@ -867,13 +863,13 @@
 
 size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
                           const void* customDictContent, size_t dictContentSize,
-                          const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                          ZDICT_params_t params)
+                          const void* samplesBuffer, const size_t* samplesSizes,
+                          unsigned nbSamples, ZDICT_params_t params)
 {
     size_t hSize;
 #define HBUFFSIZE 256   /* should prove large enough for all entropy headers */
     BYTE header[HBUFFSIZE];
-    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
     U32 const notificationLevel = params.notificationLevel;
 
     /* check conditions */
@@ -914,11 +910,12 @@
 }
 
 
-size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                                                 ZDICT_params_t params)
+static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
+        void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+        ZDICT_params_t params)
 {
-    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
     U32 const notificationLevel = params.notificationLevel;
     size_t hSize = 8;
 
@@ -947,7 +944,11 @@
     return MIN(dictBufferCapacity, hSize+dictContentSize);
 }
 
-
+/* Hidden declaration for dbio.c */
+size_t ZDICT_trainFromBuffer_unsafe_legacy(
+                            void* dictBuffer, size_t maxDictSize,
+                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                            ZDICT_legacy_params_t params);
 /*! ZDICT_trainFromBuffer_unsafe_legacy() :
 *   Warning : `samplesBuffer` must be followed by noisy guard band.
 *   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
@@ -982,31 +983,33 @@
 
     /* display best matches */
     if (params.zParams.notificationLevel>= 3) {
-        U32 const nb = MIN(25, dictList[0].pos);
-        U32 const dictContentSize = ZDICT_dictSize(dictList);
-        U32 u;
-        DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize);
+        unsigned const nb = MIN(25, dictList[0].pos);
+        unsigned const dictContentSize = ZDICT_dictSize(dictList);
+        unsigned u;
+        DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
         DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
         for (u=1; u<nb; u++) {
-            U32 const pos = dictList[u].pos;
-            U32 const length = dictList[u].length;
+            unsigned const pos = dictList[u].pos;
+            unsigned const length = dictList[u].length;
             U32 const printedLength = MIN(40, length);
-            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize))
+            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
+                free(dictList);
                 return ERROR(GENERIC);   /* should never happen */
+            }
             DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
-                         u, length, pos, dictList[u].savings);
+                         u, length, pos, (unsigned)dictList[u].savings);
             ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
             DISPLAYLEVEL(3, "| \n");
     }   }
 
 
     /* create dictionary */
-    {   U32 dictContentSize = ZDICT_dictSize(dictList);
+    {   unsigned dictContentSize = ZDICT_dictSize(dictList);
         if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* dictionary content too small */
         if (dictContentSize < targetDictSize/4) {
-            DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize);
+            DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
             if (samplesBuffSize < 10 * targetDictSize)
-                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
+                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
             if (minRep > MINRATIO) {
                 DISPLAYLEVEL(2, "!  consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
                 DISPLAYLEVEL(2, "!  note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
@@ -1014,9 +1017,9 @@
         }
 
         if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
-            U32 proposedSelectivity = selectivity-1;
+            unsigned proposedSelectivity = selectivity-1;
             while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
-            DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize);
+            DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
             DISPLAYLEVEL(2, "!  consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
             DISPLAYLEVEL(2, "!  always test dictionary efficiency on real samples \n");
         }
@@ -1082,17 +1085,17 @@
 size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
 {
-    ZDICT_cover_params_t params;
+    ZDICT_fastCover_params_t params;
     DEBUGLOG(3, "ZDICT_trainFromBuffer");
     memset(&params, 0, sizeof(params));
     params.d = 8;
     params.steps = 4;
     /* Default to level 6 since no compression level information is available */
-    params.zParams.compressionLevel = 6;
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
-    params.zParams.notificationLevel = ZSTD_DEBUG;
+    params.zParams.compressionLevel = 3;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
+    params.zParams.notificationLevel = DEBUGLEVEL;
 #endif
-    return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity,
+    return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
                                                samplesBuffer, samplesSizes, nbSamples,
                                                &params);
 }
diff --git a/vendor/github.com/DataDog/zstd/zdict.h b/vendor/github.com/DataDog/zstd/zdict.h
index ad459c2..37978ec 100644
--- a/vendor/github.com/DataDog/zstd/zdict.h
+++ b/vendor/github.com/DataDog/zstd/zdict.h
@@ -39,20 +39,27 @@
 
 /*! ZDICT_trainFromBuffer():
  *  Train a dictionary from an array of samples.
- *  Redirect towards ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
+ *  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
+ *  f=20, and accel=1.
  *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
  *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
  *  The resulting dictionary will be saved into `dictBuffer`.
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
- *  Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
+ *  Note:  Dictionary training will fail if there are not enough samples to construct a
+ *         dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
+ *         If dictionary training fails, you should use zstd without a dictionary, as the dictionary
+ *         would've been ineffective anyways. If you believe your samples would benefit from a dictionary
+ *         please open an issue with details, and we can look into it.
+ *  Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
  *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
  *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
  */
 ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
-                                    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+                                    const void* samplesBuffer,
+                                    const size_t* samplesSizes, unsigned nbSamples);
 
 
 /*======   Helper functions   ======*/
@@ -84,11 +91,27 @@
 typedef struct {
     unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
     unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
-    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
     unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
+    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
+    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
     ZDICT_params_t zParams;
 } ZDICT_cover_params_t;
 
+typedef struct {
+    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+    unsigned f;                  /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
+    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
+    unsigned accel;              /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
+    unsigned shrinkDict;         /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking  */
+    unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
+
+    ZDICT_params_t zParams;
+} ZDICT_fastCover_params_t;
 
 /*! ZDICT_trainFromBuffer_cover():
  *  Train a dictionary from an array of samples using the COVER algorithm.
@@ -97,6 +120,7 @@
  *  The resulting dictionary will be saved into `dictBuffer`.
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  *  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
@@ -115,13 +139,14 @@
  * dictionary constructed with those parameters is stored in `dictBuffer`.
  *
  * All of the parameters d, k, steps are optional.
- * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
  * if steps is zero it defaults to its default value.
- * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- *           or an error code, which can be tested with ZDICT_isError().
- *           On success `*parameters` contains the parameters selected.
+ *          or an error code, which can be tested with ZDICT_isError().
+ *          On success `*parameters` contains the parameters selected.
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
  */
 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
@@ -129,6 +154,50 @@
     const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
           ZDICT_cover_params_t* parameters);
 
+/*! ZDICT_trainFromBuffer_fastCover():
+ *  Train a dictionary from an array of samples using a modified version of COVER algorithm.
+ *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ *  d and k are required.
+ *  All other parameters are optional, will use default values if not provided
+ *  The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
+ *  Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.
+ *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
+                    size_t dictBufferCapacity, const void *samplesBuffer,
+                    const size_t *samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_fastCover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations (specifically, k and d combinations)
+ * and picks the best parameters. `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ * All of the parameters d, k, steps, f, and accel are optional.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
+ * if steps is zero it defaults to its default value.
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
+ * If f is zero, default value of 20 is used.
+ * If accel is zero, default value of 1 is used.
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *          On success `*parameters` contains the parameters selected.
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
+ * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
+                    size_t dictBufferCapacity, const void* samplesBuffer,
+                    const size_t* samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t* parameters);
+
 /*! ZDICT_finalizeDictionary():
  * Given a custom content as a basis for dictionary, and a set of samples,
  * finalize dictionary by adding headers and statistics.
@@ -140,7 +209,7 @@
  * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
- *           or an error code, which can be tested by ZDICT_isError().
+ *          or an error code, which can be tested by ZDICT_isError().
  * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
  * Note 2: dictBuffer and dictContent can overlap
  */
@@ -164,6 +233,7 @@
  * `parameters` is optional and can be provided with values set to 0 to mean "default".
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *          or an error code, which can be tested with ZDICT_isError().
+ *          See ZDICT_trainFromBuffer() for details on failure modes.
  *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
  *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
  *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
diff --git a/vendor/github.com/DataDog/zstd/zstd.go b/vendor/github.com/DataDog/zstd/zstd.go
index e9953d6..b6af4eb 100644
--- a/vendor/github.com/DataDog/zstd/zstd.go
+++ b/vendor/github.com/DataDog/zstd/zstd.go
@@ -23,6 +23,7 @@
 	"bytes"
 	"errors"
 	"io/ioutil"
+	"runtime"
 	"unsafe"
 )
 
@@ -84,6 +85,7 @@
 		C.size_t(len(src)),
 		C.int(level))
 
+	runtime.KeepAlive(src)
 	written := int(cWritten)
 	// Check if the return is an Error code
 	if err := getError(written); err != nil {
@@ -107,6 +109,7 @@
 			C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
 			C.size_t(len(src)))
 
+		runtime.KeepAlive(src)
 		written := int(cWritten)
 		// Check error
 		if err := getError(written); err != nil {
@@ -115,7 +118,7 @@
 		return dst[:written], nil
 	}
 
-	if dst == nil {
+	if len(dst) == 0 {
 		// Attempt to use zStd to determine decompressed size (may result in error or 0)
 		size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src)))))
 
diff --git a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h
index 6405da6..a1910ee 100644
--- a/vendor/github.com/DataDog/zstd/zstd.h
+++ b/vendor/github.com/DataDog/zstd/zstd.h
@@ -35,40 +35,73 @@
 #endif
 
 
-/*******************************************************************************************************
+/*******************************************************************************
   Introduction
 
-  zstd, short for Zstandard, is a fast lossless compression algorithm,
-  targeting real-time compression scenarios at zlib-level and better compression ratios.
-  The zstd compression library provides in-memory compression and decompression functions.
-  The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
-  Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
+  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+  real-time compression scenarios at zlib-level and better compression ratios.
+  The zstd compression library provides in-memory compression and decompression
+  functions.
+
+  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+  caution, as they require more memory. The library also offers negative
+  compression levels, which extend the range of speed vs. ratio preferences.
+  The lower the level, the faster the speed (at the cost of compression).
+
   Compression can be done in:
     - a single step (described as Simple API)
     - a single step, reusing a context (described as Explicit context)
     - unbounded multiple steps (described as Streaming compression)
-  The compression ratio achievable on small data can be highly improved using a dictionary in:
-    - a single step (described as Simple dictionary API)
-    - a single step, reusing a dictionary (described as Bulk-processing dictionary API)
 
-  Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
-  Advanced experimental APIs shall never be used with a dynamic library.
-  They are not "stable", their definition may change in the future. Only static linking is allowed.
-*********************************************************************************************************/
+  The compression ratio achievable on small data can be highly improved using
+  a dictionary. Dictionary compression can be performed in:
+    - a single step (described as Simple dictionary API)
+    - a single step, reusing a dictionary (described as Bulk-processing
+      dictionary API)
+
+  Advanced experimental functions can be accessed using
+  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+  Advanced experimental APIs should never be used with a dynamically-linked
+  library. They are not "stable"; their definitions or signatures may change in
+  the future. Only static linking is allowed.
+*******************************************************************************/
 
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
-#define ZSTD_VERSION_MINOR    3
-#define ZSTD_VERSION_RELEASE  4
+#define ZSTD_VERSION_MINOR    4
+#define ZSTD_VERSION_RELEASE  1
 
 #define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< useful to check dll version */
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< to check runtime library version */
 
 #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
 #define ZSTD_QUOTE(str) #str
 #define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
 #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
-ZSTDLIB_API const char* ZSTD_versionString(void);   /* added in v1.3.0 */
+ZSTDLIB_API const char* ZSTD_versionString(void);   /* requires v1.3.0+ */
+
+/* *************************************
+ *  Default constant
+ ***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+#  define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+/* *************************************
+ *  Constants
+ ***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX  17
+#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
+
 
 
 /***************************************
@@ -92,11 +125,11 @@
 ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
                               const void* src, size_t compressedSize);
 
-/*! ZSTD_getFrameContentSize() : added in v1.3.0
+/*! ZSTD_getFrameContentSize() : requires v1.3.0+
  *  `src` should point to the start of a ZSTD encoded frame.
  *  `srcSize` must be at least as large as the frame header.
  *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- *  @return : - decompressed size of the frame in `src`, if known
+ *  @return : - decompressed size of `src` frame content, if known
  *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
  *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
  *   note 1 : a 0 return value means the frame is valid but "empty".
@@ -106,7 +139,8 @@
  *            Optionally, application can rely on some implicit limit,
  *            as ZSTD_decompress() only needs an upper bound of decompressed size.
  *            (For example, data could be necessarily cut into blocks <= 16 KB).
- *   note 3 : decompressed size is always present when compression is done with ZSTD_compress()
+ *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
  *   note 4 : decompressed size can be very large (64-bits value),
  *            potentially larger than what local system can handle as a single memory segment.
  *            In which case, it's necessary to use streaming mode to decompress data.
@@ -123,16 +157,24 @@
  *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
  *  "empty", "unknown" and "error" results to the same return value (0),
  *  while ZSTD_getFrameContentSize() gives them separate return values.
- * `src` is the start of a zstd compressed frame.
- * @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
 ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
 
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ *        or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
 
 /*======  Helper functions  ======*/
 #define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
 ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
 ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
 ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
+ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
 ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
 
 
@@ -141,16 +183,23 @@
 ***************************************/
 /*= Compression context
  *  When compressing many times,
- *  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ *  it is recommended to allocate a context just once,
+ *  and re-use it for each successive compression operation.
  *  This will make workload friendlier for system's memory.
- *  Use one context per thread for parallel execution in multi-threaded environments. */
+ *  Note : re-using context is just a speed / resource optimization.
+ *         It doesn't change the compression ratio, which remains identical.
+ *  Note 2 : In multi-threaded environments,
+ *         use one different context per thread for parallel execution.
+ */
 typedef struct ZSTD_CCtx_s ZSTD_CCtx;
 ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
 ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
 
 /*! ZSTD_compressCCtx() :
- *  Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
-ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
+ *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx
+ *  The function will compress at requested compression level,
+ *  ignoring any other parameter */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
                                      void* dst, size_t dstCapacity,
                                const void* src, size_t srcSize,
                                      int compressionLevel);
@@ -166,82 +215,328 @@
 ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
 
 /*! ZSTD_decompressDCtx() :
- *  Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */
-ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
+ *  Same as ZSTD_decompress(),
+ *  requires an allocated ZSTD_DCtx.
+ *  Compatible with sticky parameters.
+ */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
                                        void* dst, size_t dstCapacity,
                                  const void* src, size_t srcSize);
 
 
-/**************************
-*  Simple dictionary API
-***************************/
-/*! ZSTD_compress_usingDict() :
- *  Compression using a predefined Dictionary (see dictBuilder/zdict.h).
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const void* dict,size_t dictSize,
-                                           int compressionLevel);
+/***************************************
+*  Advanced compression API
+***************************************/
 
-/*! ZSTD_decompress_usingDict() :
- *  Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
- *  Dictionary must be identical to the one used during compression.
- *  Note : This function loads the dictionary, resulting in significant startup delay.
- *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
-                                             void* dst, size_t dstCapacity,
-                                       const void* src, size_t srcSize,
-                                       const void* dict,size_t dictSize);
+/* API design :
+ *   Parameters are pushed one by one into an existing context,
+ *   using ZSTD_CCtx_set*() functions.
+ *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
+ *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
+ *   They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()
+ *
+ *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
+ *
+ *   This API supercedes all other "advanced" API entry points in the experimental section.
+ *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ */
 
 
-/**********************************
- *  Bulk processing dictionary API
- *********************************/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
-
-/*! ZSTD_createCDict() :
- *  When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
- *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
- *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- *  `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
-                                         int compressionLevel);
-
-/*! ZSTD_freeCDict() :
- *  Function frees memory allocated by ZSTD_createCDict(). */
-ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
-
-/*! ZSTD_compress_usingCDict() :
- *  Compression using a digested Dictionary.
- *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
- *  Note that compression level is decided during dictionary creation.
- *  Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
-                                            void* dst, size_t dstCapacity,
-                                      const void* src, size_t srcSize,
-                                      const ZSTD_CDict* cdict);
+/* Compression strategies, listed from fastest to strongest */
+typedef enum { ZSTD_fast=1,
+               ZSTD_dfast=2,
+               ZSTD_greedy=3,
+               ZSTD_lazy=4,
+               ZSTD_lazy2=5,
+               ZSTD_btlazy2=6,
+               ZSTD_btopt=7,
+               ZSTD_btultra=8,
+               ZSTD_btultra2=9
+               /* note : new strategies _might_ be added in the future.
+                         Only the order (from fast to strong) is guaranteed */
+} ZSTD_strategy;
 
 
-typedef struct ZSTD_DDict_s ZSTD_DDict;
+typedef enum {
 
-/*! ZSTD_createDDict() :
- *  Create a digested dictionary, ready to start decompression operation without startup delay.
- *  dictBuffer can be released after DDict creation, as its content is copied inside DDict */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+    /* compression parameters
+     * Note: When compressing with a ZSTD_CDict these parameters are superseded
+     * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
+     * for more info (superseded-by-cdict). */
+    ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
+                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
+                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
+                              * Note 1 : it's possible to pass a negative compression level.
+                              * Note 2 : setting a level sets all default values of other compression parameters */
+    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
+                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+                              * Special: value 0 means "use default windowLog".
+                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
+                              *       requires explicitly allowing such window size at decompression stage if using streaming. */
+    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
+                              * Resulting memory usage is (1 << (hashLog+2)).
+                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+                              * Larger tables improve compression ratio of strategies <= dFast,
+                              * and improve speed of strategies > dFast.
+                              * Special: value 0 means "use default hashLog". */
+    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
+                              * Resulting memory usage is (1 << (chainLog+2)).
+                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
+                              * Larger tables result in better and slower compression.
+                              * This parameter is useless when using "fast" strategy.
+                              * It's still useful when using "dfast" strategy,
+                              * in which case it defines a secondary probe table.
+                              * Special: value 0 means "use default chainLog". */
+    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
+                              * More attempts result in better and slower compression.
+                              * This parameter is useless when using "fast" and "dFast" strategies.
+                              * Special: value 0 means "use default searchLog". */
+    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
+                              * Note that Zstandard can still find matches of smaller size,
+                              * it just tweaks its search algorithm to look for this size and larger.
+                              * Larger values increase compression and decompression speed, but decrease ratio.
+                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
+                              * Note that currently, for all strategies < btopt, effective minimum is 4.
+                              *                    , for all strategies > fast, effective maximum is 6.
+                              * Special: value 0 means "use default minMatchLength". */
+    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
+                              * For strategies btopt, btultra & btultra2:
+                              *     Length of Match considered "good enough" to stop search.
+                              *     Larger values make compression stronger, and slower.
+                              * For strategy fast:
+                              *     Distance between match sampling.
+                              *     Larger values make compression faster, and weaker.
+                              * Special: value 0 means "use default targetLength". */
+    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
+                              * The higher the value of selected strategy, the more complex it is,
+                              * resulting in stronger and slower compression.
+                              * Special: value 0 means "use default strategy". */
 
-/*! ZSTD_freeDDict() :
- *  Function frees memory allocated with ZSTD_createDDict() */
-ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
+    /* LDM mode parameters */
+    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
+                                     * This parameter is designed to improve compression ratio
+                                     * for large inputs, by finding large matches at long distance.
+                                     * It increases memory usage and window size.
+                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
+                                     * except when expressly set to a different value. */
+    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
+                              * Larger values increase memory usage and compression ratio,
+                              * but decrease compression speed.
+                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+                              * default: windowlog - 7.
+                              * Special: value 0 means "automatically determine hashlog". */
+    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
+                              * Larger/too small values usually decrease compression ratio.
+                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+                              * Special: value 0 means "use default value" (default: 64). */
+    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
+                              * Larger values improve collision resolution but decrease compression speed.
+                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
+                              * Special: value 0 means "use default value" (default: 3). */
+    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
+                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+                              * Larger values improve compression speed.
+                              * Deviating far from default value will likely result in a compression ratio decrease.
+                              * Special: value 0 means "automatically determine hashRateLog". */
 
-/*! ZSTD_decompress_usingDDict() :
- *  Decompression using a digested Dictionary.
- *  Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
-                                              void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize,
-                                        const ZSTD_DDict* ddict);
+    /* frame parameters */
+    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+                              * Content size must be known at the beginning of compression.
+                              * This is automatically the case when using ZSTD_compress2(),
+                              * For streaming variants, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
+    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+    /* multi-threading parameters */
+    /* These parameters are only useful if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
+     * They return an error otherwise. */
+    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
+                              * When nbWorkers >= 1, triggers asynchronous mode when used with ZSTD_compressStream*() :
+                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
+                              * while compression work is performed in parallel, within worker threads.
+                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
+                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
+                              * More workers improve speed, but also increase memory usage.
+                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
+    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
+                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
+                              * 0 means default, which is dynamically determined based on compression parameters.
+                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
+                              * The minimum size is automatically and transparently enforced */
+    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
+                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
+                              * It helps preserve compression ratio, while each job is compressed in parallel.
+                              * This value is enforced only when nbWorkers >= 1.
+                              * Larger values increase compression ratio, but decrease speed.
+                              * Possible values range from 0 to 9 :
+                              * - 0 means "default" : value will be determined by the library, depending on strategy
+                              * - 1 means "no overlap"
+                              * - 9 means "full overlap", using a full window size.
+                              * Each intermediate rank increases/decreases load size by a factor 2 :
+                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
+                              * default value varies between 6 and 9, depending on strategy */
+
+    /* note : additional experimental parameters are also available
+     * within the experimental section of the API.
+     * At the time of this writing, they include :
+     * ZSTD_c_rsyncable
+     * ZSTD_c_format
+     * ZSTD_c_forceMaxWindow
+     * ZSTD_c_forceAttachDict
+     * ZSTD_c_literalCompressionMode
+     * ZSTD_c_targetCBlockSize
+     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+     * note : never ever use experimentalParam? names directly;
+     *        also, the enums values themselves are unstable and can still change.
+     */
+     ZSTD_c_experimentalParam1=500,
+     ZSTD_c_experimentalParam2=10,
+     ZSTD_c_experimentalParam3=1000,
+     ZSTD_c_experimentalParam4=1001,
+     ZSTD_c_experimentalParam5=1002,
+     ZSTD_c_experimentalParam6=1003,
+} ZSTD_cParameter;
+
+typedef struct {
+    size_t error;
+    int lowerBound;
+    int upperBound;
+} ZSTD_bounds;
+
+/*! ZSTD_cParam_getBounds() :
+ *  All parameters must belong to an interval with lower and upper bounds,
+ *  otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ *         - an error status field, which must be tested using ZSTD_isError()
+ *         - lower and upper bounds, both inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
+
+/*! ZSTD_CCtx_setParameter() :
+ *  Set one compression parameter, selected by enum ZSTD_cParameter.
+ *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
+ *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ *  Setting a parameter is generally only possible during frame initialization (before starting compression).
+ *  Exception : when using multi-threading mode (nbWorkers >= 1),
+ *              the following parameters can be updated _during_ compression (within same frame):
+ *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ *              new parameters will be active for next job only (after a flush()).
+ * @return : an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ *  Total input data size to be compressed as a single frame.
+ *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
+ *  This value will also be controlled at end of frame, and trigger an error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
+ *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
+ *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
+ *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
+ *  Note 3 : Whenever all input data is provided and consumed in a single round,
+ *           for example with ZSTD_compress2(),
+ *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
+ *           this value is automatically overridden by srcSize instead.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+typedef enum {
+    ZSTD_reset_session_only = 1,
+    ZSTD_reset_parameters = 2,
+    ZSTD_reset_session_and_parameters = 3
+} ZSTD_ResetDirective;
+
+/*! ZSTD_CCtx_reset() :
+ *  There are 2 different things that can be reset, independently or jointly :
+ *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
+ *                  Useful after an error, or to interrupt any ongoing compression.
+ *                  Any internal data not yet flushed is cancelled.
+ *                  Compression parameters and dictionary remain unchanged.
+ *                  They will be used to compress next frame.
+ *                  Resetting session never fails.
+ *  - The parameters : changes all parameters back to "default".
+ *                  This removes any reference to any dictionary too.
+ *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
+ *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
+ *  - Both : similar to resetting the session, followed by resetting parameters.
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
+
+/*! ZSTD_compress2() :
+ *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ *  ZSTD_compress2() always starts a new frame.
+ *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
+ *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ *  - The function is always blocking, returns when compression is completed.
+ *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ *           or an error code if it fails (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
+                                   void* dst, size_t dstCapacity,
+                             const void* src, size_t srcSize);
+
+
+/***************************************
+*  Advanced decompression API
+***************************************/
+
+/* The advanced API pushes parameters one by one into an existing DCtx context.
+ * Parameters are sticky, and remain valid for all following frames
+ * using the same DCtx context.
+ * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
+ * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
+ *        Therefore, no new decompression function is necessary.
+ */
+
+typedef enum {
+
+    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
+                              * the streaming API will refuse to allocate memory buffer
+                              * in order to protect the host from unreasonable memory requirements.
+                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+                              * Special: value 0 means "use default maximum windowLog". */
+
+    /* note : additional experimental parameters are also available
+     * within the experimental section of the API.
+     * At the time of this writing, they include :
+     * ZSTD_c_format
+     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
+     * note : never ever use experimentalParam? names directly
+     */
+     ZSTD_d_experimentalParam1=1000
+
+} ZSTD_dParameter;
+
+/*! ZSTD_dParam_getBounds() :
+ *  All parameters must belong to an interval with lower and upper bounds,
+ *  otherwise they will either trigger an error or be automatically clamped.
+ * @return : a structure, ZSTD_bounds, which contains
+ *         - an error status field, which must be tested using ZSTD_isError()
+ *         - both lower and upper bounds, inclusive
+ */
+ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
+
+/*! ZSTD_DCtx_setParameter() :
+ *  Set one compression parameter, selected by enum ZSTD_dParameter.
+ *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
+ *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
+ *  Setting a parameter is only possible during frame initialization (before starting decompression).
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+
+/*! ZSTD_DCtx_reset() :
+ *  Return a DCtx to clean state.
+ *  Session and parameters can be reset jointly or separately.
+ *  Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
 
 
 /****************************
@@ -268,57 +563,153 @@
 *  A ZSTD_CStream object is required to track streaming operation.
 *  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
 *  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-*  It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively,
-*  since it will play nicer with system's memory, by re-using already allocated memory.
-*  Use one separate ZSTD_CStream per thread for parallel execution.
+*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
 *
-*  Start a new compression by initializing ZSTD_CStream.
-*  Use ZSTD_initCStream() to start a new compression operation.
-*  Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
+*  For parallel execution, use one separate ZSTD_CStream per thread.
 *
-*  Use ZSTD_compressStream() repetitively to consume input stream.
-*  The function will automatically update both `pos` fields.
-*  Note that it may not consume the entire input, in which case `pos < size`,
-*  and it's up to the caller to present again remaining data.
-*  @return : a size hint, preferred nb of bytes to use as input for next function call
-*            or an error code, which can be tested using ZSTD_isError().
-*            Note 1 : it's just a hint, to help latency a little, any other value will work fine.
-*            Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
+*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
 *
-*  At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
-*  `output->pos` will be updated.
-*  Note that some content might still be left within internal buffer if `output->size` is too small.
-*  @return : nb of bytes still present within internal buffer (0 if it's empty)
+*  Parameters are sticky : when starting a new compression on the same context,
+*  it will re-use the same sticky parameters as previous compression session.
+*  When in doubt, it's recommended to fully initialize the context before usage.
+*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+*  set more specific parameters, the pledged source size, or load a dictionary.
+*
+*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+*  consume input stream. The function will automatically update both `pos`
+*  fields within `input` and `output`.
+*  Note that the function may not consume the entire input, for example, because
+*  the output buffer is already full, in which case `input.pos < input.size`.
+*  The caller must check if input has been entirely consumed.
+*  If not, the caller must make some room to receive more compressed data,
+*  and then present again remaining input data.
+*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+*        but doesn't guarantee maximal forward progress. This is especially relevant
+*        when compressing with multiple threads. The call won't block if it can
+*        consume some input, but if it can't it will wait for some, but not all,
+*        output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+*           or an error code, which can be tested using ZSTD_isError().
+*
+*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+*  operation.
+*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+*        block until the flush is complete or the output buffer is full.
+*  @return : 0 if internal buffers are entirely flushed,
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
 *            or an error code, which can be tested using ZSTD_isError().
 *
-*  ZSTD_endStream() instructs to finish a frame.
+*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
 *  It will perform a flush and write frame epilogue.
 *  The epilogue is required for decoders to consider a frame completed.
-*  ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
-*  In which case, call again ZSTD_endStream() to complete the flush.
+*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+*  start a new frame.
+*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+*        block until the flush is complete or the output buffer is full.
 *  @return : 0 if frame fully completed and fully flushed,
-             or >0 if some data is still present within internal buffer
-                  (value is minimum size estimation for remaining data to flush, but it could be more)
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
 *            or an error code, which can be tested using ZSTD_isError().
 *
 * *******************************************************************/
 
 typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
-                                 /* Continue to distinguish them for compatibility with versions <= v1.2.0 */
+                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
 /*===== ZSTD_CStream management functions =====*/
 ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
 ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
 
 /*===== Streaming compression functions =====*/
-ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+typedef enum {
+    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+    ZSTD_e_flush=1,    /* flush any data provided so far,
+                        * it creates (at least) one new block, that can be decoded immediately on reception;
+                        * frame will continue: any future data can still reference previously compressed data, improving compression.
+                        * note : multithreaded compression will block to flush as much output as possible. */
+    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
+                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
+                        * After that point, any additional data starts a new frame.
+                        * note : each frame is independent (does not reference any content from previous frame).
+                        : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
 
+/*! ZSTD_compressStream2() :
+ *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ *  - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
+ *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
+ *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ *            or an error code, which can be tested using ZSTD_isError().
+ *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ *            Before starting a new compression job, or changing compression parameters,
+ *            it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+                                         ZSTD_outBuffer* output,
+                                         ZSTD_inBuffer* input,
+                                         ZSTD_EndDirective endOp);
+
+
+/* These buffer sizes are softly recommended.
+ * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
+ * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
+ * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
+ *
+ * However, note that these recommendations are from the perspective of a C caller program.
+ * If the streaming interface is invoked from some other language,
+ * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
+ * a major performance rule is to reduce crossing such interface to an absolute minimum.
+ * It's not rare that performance ends being spent more into the interface, rather than compression itself.
+ * In which cases, prefer using large buffers, as large as practical,
+ * for both input and output, to reduce the nb of roundtrips.
+ */
 ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
 
 
+/* *****************************************************************************
+ * This following is a legacy streaming API.
+ * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
+ * It is redundant, but remains fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/*!
+ * Equivalent to:
+ *
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/*!
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the minimum nb of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
 
 /*-***************************************************************************
 *  Streaming decompression - HowTo
@@ -327,101 +718,374 @@
 *  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
 *  ZSTD_DStream objects can be re-used multiple times.
 *
-*  Use ZSTD_initDStream() to start a new decompression operation,
-*   or ZSTD_initDStream_usingDict() if decompression requires a dictionary.
-*   @return : recommended first input size
+*  Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+*  Alternatively, use advanced API to set specific properties.
 *
 *  Use ZSTD_decompressStream() repetitively to consume your input.
 *  The function will update both `pos` fields.
 *  If `input.pos < input.size`, some input has not been consumed.
 *  It's up to the caller to present again remaining data.
+*  The function tries to flush all data decoded immediately, respecting output buffer size.
 *  If `output.pos < output.size`, decoder has flushed everything it could.
-*  @return : 0 when a frame is completely decoded and fully flushed,
-*            an error code, which can be tested using ZSTD_isError(),
-*            any other value > 0, which means there is still some decoding to do to complete current frame.
-*            The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
+*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
+*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+*        or an error code, which can be tested using ZSTD_isError(),
+*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+*                                the return value is a suggested next input size (just a hint for better latency)
+*                                that will never request more than the remaining frame size.
 * *******************************************************************************/
 
 typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
-                                 /* For compatibility with versions <= v1.2.0, continue to consider them separated. */
+                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
 /*===== ZSTD_DStream management functions =====*/
 ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
 ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
 
 /*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds);
+ *     ZSTD_DCtx_refDDict(zds, NULL);
+ */
 ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
 ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
 
 ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
 ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
 
+
+/**************************
+*  Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ *  Compression at an explicit compression level using a Dictionary.
+ *  A dictionary can be any arbitrary data segment (also called a prefix),
+ *  or a buffer with specified information (see dictBuilder/zdict.h).
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *         It's intended for a dictionary used only once.
+ *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const void* dict,size_t dictSize,
+                                           int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ *  Decompression using a known Dictionary.
+ *  Dictionary must be identical to the one used during compression.
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *         It's intended for a dictionary used only once.
+ *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                             void* dst, size_t dstCapacity,
+                                       const void* src, size_t srcSize,
+                                       const void* dict,size_t dictSize);
+
+
+/***********************************
+ *  Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ *  When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
+ *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
+ *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ *  Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
+ *  Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+                                         int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ *  Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ *  Compression using a digested Dictionary.
+ *  Recommended when same dictionary is used multiple times.
+ *  Note : compression level is _decided at dictionary creation time_,
+ *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                            void* dst, size_t dstCapacity,
+                                      const void* src, size_t srcSize,
+                                      const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ *  Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ *  Decompression using a digested Dictionary.
+ *  Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                              void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize,
+                                        const ZSTD_DDict* ddict);
+
+
+/********************************
+ *  Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/*******************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ *  Create an internal CDict from `dict` buffer.
+ *  Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ *           meaning "return to no-dictionary mode".
+ *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ *  Note 2 : Loading a dictionary involves building tables.
+ *           It's also a CPU consuming operation, with non-negligible impact on latency.
+ *           Tables are dependent on compression parameters, and for this reason,
+ *           compression parameters can no longer be changed after loading a dictionary.
+ *  Note 3 :`dict` content will be copied internally.
+ *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ *           In such a case, dictionary buffer must outlive its users.
+ *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ *           to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ *  Reference a prepared dictionary, to be used for all next compressed frames.
+ *  Note that compression parameters are enforced from within CDict,
+ *  and supersede any compression parameter previously set within CCtx.
+ *  The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ *  The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ *  Note 1 : Currently, only one dictionary can be managed.
+ *           Referencing a new dictionary effectively "discards" any previous one.
+ *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ *  Reference a prefix (single-usage dictionary) for next compressed frame.
+ *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ *  Decompression will need same prefix to properly regenerate data.
+ *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ *           Its content must remain unmodified during compression.
+ *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ *           ensure that the window size is large enough to contain the entire source.
+ *           See ZSTD_c_windowLog.
+ *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ *           It's a CPU consuming operation, with non-negligible impact on latency.
+ *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
+ *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+                                 const void* prefix, size_t prefixSize);
+
+/*! ZSTD_DCtx_loadDictionary() :
+ *  Create an internal DDict from dict buffer,
+ *  to be used to decompress next frames.
+ *  The dictionary remains valid for all future frames, until explicitly invalidated.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ *            meaning "return to no-dictionary mode".
+ *  Note 1 : Loading a dictionary involves building tables,
+ *           which has a non-negligible impact on CPU usage and latency.
+ *           It's recommended to "load once, use many times", to amortize the cost
+ *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
+ *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
+ *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
+ *           how dictionary content is loaded and interpreted.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_DCtx_refDDict() :
+ *  Reference a prepared dictionary, to be used to decompress next frames.
+ *  The dictionary remains active for decompression of future frames using same DCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : Currently, only one dictionary can be managed.
+ *           Referencing a new dictionary effectively "discards" any previous one.
+ *  Special: referencing a NULL DDict means "return to no-dictionary mode".
+ *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+/*! ZSTD_DCtx_refPrefix() :
+ *  Reference a prefix (single-usage dictionary) to decompress next frame.
+ *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ *  and must use the same prefix as the one used during compression.
+ *  Prefix is **only used once**. Reference is discarded at end of frame.
+ *  End of frame is reached when ZSTD_decompressStream() returns 0.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
+ *           Prefix buffer must remain unmodified up to the end of frame,
+ *           reached when ZSTD_decompressStream() returns 0.
+ *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
+ *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ *           A full dictionary is more costly, as it requires building tables.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+                                 const void* prefix, size_t prefixSize);
+
+/* ===   Memory management   === */
+
+/*! ZSTD_sizeof_*() :
+ *  These functions give the _current_ memory usage of selected object.
+ *  Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
 #endif  /* ZSTD_H_235446 */
 
 
-
-/****************************************************************************************
- * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
- * The definitions in this section are considered experimental.
- * They should never be used with a dynamic library, as prototypes may change in the future.
+/* **************************************************************************************
+ *   ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
  * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
  * Use them only in association with static linking.
  * ***************************************************************************************/
 
 #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
 #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
 
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
-#define ZSTD_MAGIC_DICTIONARY       0xEC30A437   /* >= v0.7.0 */
+/****************************************************************************************
+ *   experimental API (static linking only)
+ ****************************************************************************************
+ * The following symbols and constants
+ * are not planned to join "stable API" status in the near future.
+ * They can still change in future versions.
+ * Some of them are planned to remain in the static_only section indefinitely.
+ * Some of them might be removed in the future (especially when redundant with existing stable functions)
+ * ***************************************************************************************/
 
-#define ZSTD_WINDOWLOG_MAX_32   30
-#define ZSTD_WINDOWLOG_MAX_64   31
-#define ZSTD_WINDOWLOG_MAX    ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
-#define ZSTD_WINDOWLOG_MIN      10
-#define ZSTD_HASHLOG_MAX      ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
-#define ZSTD_HASHLOG_MIN         6
-#define ZSTD_CHAINLOG_MAX_32    29
-#define ZSTD_CHAINLOG_MAX_64    30
-#define ZSTD_CHAINLOG_MAX     ((unsigned)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
-#define ZSTD_CHAINLOG_MIN       ZSTD_HASHLOG_MIN
-#define ZSTD_HASHLOG3_MAX       17
-#define ZSTD_SEARCHLOG_MAX     (ZSTD_WINDOWLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN       1
-#define ZSTD_SEARCHLENGTH_MAX    7   /* only for ZSTD_fast, other strategies are limited to 6 */
-#define ZSTD_SEARCHLENGTH_MIN    3   /* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_TARGETLENGTH_MIN    1   /* only used by btopt, btultra and btfast */
-#define ZSTD_LDM_MINMATCH_MIN    4
-#define ZSTD_LDM_MINMATCH_MAX 4096
-#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
-
-#define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size to know frame header size */
+#define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size required to query frame header size */
 #define ZSTD_FRAMEHEADERSIZE_MIN    6
-#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* for static allocation */
-static const size_t ZSTD_frameHeaderSize_prefix = ZSTD_FRAMEHEADERSIZE_PREFIX;
-static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
-static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
-static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
+#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
+#define ZSTD_SKIPPABLEHEADERSIZE    8
+
+/* compression parameter bounds */
+#define ZSTD_WINDOWLOG_MAX_32    30
+#define ZSTD_WINDOWLOG_MAX_64    31
+#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN       10
+#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN          6
+#define ZSTD_CHAINLOG_MAX_32     29
+#define ZSTD_CHAINLOG_MAX_64     30
+#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
+#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN        1
+#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_STRATEGY_MIN        ZSTD_fast
+#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
 
 
-/*--- Advanced types ---*/
-typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
-               ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy;   /* from faster to stronger */
+#define ZSTD_OVERLAPLOG_MIN       0
+#define ZSTD_OVERLAPLOG_MAX       9
+
+#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
+                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
+                                           * to preserve host's memory from unreasonable requirements.
+                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
+
+
+/* LDM parameter bounds */
+#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
+#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
+#define ZSTD_LDM_MINMATCH_MIN        4
+#define ZSTD_LDM_MINMATCH_MAX     4096
+#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
+#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
+#define ZSTD_LDM_HASHRATELOG_MIN     0
+#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+
+/* Advanced parameter bounds */
+#define ZSTD_TARGETCBLOCKSIZE_MIN   64
+#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
+
+/* internal */
+#define ZSTD_HASHLOG3_MAX           17
+
+
+/* ---  Advanced types  --- */
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
 
 typedef struct {
-    unsigned windowLog;      /**< largest match distance : larger == more compression, more memory needed during decompression */
-    unsigned chainLog;       /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
-    unsigned hashLog;        /**< dispatch table : larger == faster, more memory */
-    unsigned searchLog;      /**< nb of searches : larger == more compression, slower */
-    unsigned searchLength;   /**< match length searched : larger == faster decompression, sometimes less compression */
-    unsigned targetLength;   /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
-    ZSTD_strategy strategy;
+    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
+    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
+    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
+    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
+    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
+    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
 } ZSTD_compressionParameters;
 
 typedef struct {
-    unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
-    unsigned checksumFlag;    /**< 1: generate a 32-bits checksum at end of frame, for error detection */
-    unsigned noDictIDFlag;    /**< 1: no dictID will be saved into frame header (if dictionary compression) */
+    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
+    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
 } ZSTD_frameParameters;
 
 typedef struct {
@@ -429,37 +1093,83 @@
     ZSTD_frameParameters fParams;
 } ZSTD_parameters;
 
-typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
-
 typedef enum {
-    ZSTD_dct_auto=0,      /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
-    ZSTD_dct_rawContent,  /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
-    ZSTD_dct_fullDict     /* refuses to load a dictionary if it does not respect Zstandard's specification */
+    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
 } ZSTD_dictContentType_e;
 
 typedef enum {
-    ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
-    ZSTD_dlm_byRef,      /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
+    ZSTD_dlm_byRef = 1,   /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
 } ZSTD_dictLoadMethod_e;
 
+typedef enum {
+    /* Opened question : should we have a format ZSTD_f_auto ?
+     * Today, it would mean exactly the same as ZSTD_f_zstd1.
+     * But, in the future, should several formats become supported,
+     * on the compression side, it would mean "default format".
+     * On the decompression side, it would mean "automatic format detection",
+     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
+     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+     * This question could be kept for later, when there are actually multiple formats to support,
+     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
+    ZSTD_f_zstd1_magicless = 1, /* Variant of zstd frame format, without initial 4-bytes magic number.
+                                 * Useful to save 4 bytes per generated frame.
+                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
+} ZSTD_format_e;
+
+typedef enum {
+    /* Note: this enum and the behavior it controls are effectively internal
+     * implementation details of the compressor. They are expected to continue
+     * to evolve and should be considered only in the context of extremely
+     * advanced performance tuning.
+     *
+     * Zstd currently supports the use of a CDict in two ways:
+     *
+     * - The contents of the CDict can be copied into the working context. This
+     *   means that the compression can search both the dictionary and input
+     *   while operating on a single set of internal tables. This makes
+     *   the compression faster per-byte of input. However, the initial copy of
+     *   the CDict's tables incurs a fixed cost at the beginning of the
+     *   compression. For small compressions (< 8 KB), that copy can dominate
+     *   the cost of the compression.
+     *
+     * - The CDict's tables can be used in-place. In this model, compression is
+     *   slower per input byte, because the compressor has to search two sets of
+     *   tables. However, this model incurs no start-up cost (as long as the
+     *   working context's tables can be reused). For small inputs, this can be
+     *   faster than copying the CDict's tables.
+     *
+     * Zstd has a simple internal heuristic that selects which strategy to use
+     * at the beginning of a compression. However, if experimentation shows that
+     * Zstd is making poor choices, it is possible to override that choice with
+     * this enum.
+     */
+    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
+    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
+    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
+} ZSTD_dictAttachPref_e;
+
+typedef enum {
+  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
+                               *   Negative compression levels will be uncompressed, and positive compression
+                               *   levels will be compressed. */
+  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
+                               *   emitted if Huffman compression is not profitable. */
+  ZSTD_lcm_uncompressed = 2,  /**< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
 
 
 /***************************************
 *  Frame size functions
 ***************************************/
 
-/*! ZSTD_findFrameCompressedSize() :
- *  `src` should point to the start of a ZSTD encoded frame or skippable frame
- *  `srcSize` must be >= first frame size
- *  @return : the compressed size of the first frame starting at `src`,
- *            suitable to pass to `ZSTD_decompress` or similar,
- *            or an error code if input is invalid */
-ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-
 /*! ZSTD_findDecompressedSize() :
- *  `src` should point the start of a series of ZSTD encoded and/or skippable frames
+ *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
  *  `srcSize` must be the _exact_ size of this series
- *       (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
+ *       (i.e. there should be a frame boundary at `src + srcSize`)
  *  @return : - decompressed size of all data in all successive frames
  *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
  *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
@@ -479,10 +1189,25 @@
  *            however it does mean that all frame data must be present and valid. */
 ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
 
+/*! ZSTD_decompressBound() :
+ *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ *  `srcSize` must be the _exact_ size of this series
+ *       (i.e. there should be a frame boundary at `src + srcSize`)
+ *  @return : - upper-bound for the decompressed size of all data in all successive frames
+ *            - if an error occured: ZSTD_CONTENTSIZE_ERROR
+ *
+ *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ *              upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
 /*! ZSTD_frameHeaderSize() :
-*   `src` should point to the start of a ZSTD frame
-*   `srcSize` must be >= ZSTD_frameHeaderSize_prefix.
-*   @return : size of the Frame Header */
+ *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * @return : size of the Frame Header,
+ *           or an error code (if srcSize is too small) */
 ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
 
 
@@ -490,16 +1215,6 @@
 *  Memory management
 ***************************************/
 
-/*! ZSTD_sizeof_*() :
- *  These functions give the current memory usage of selected object.
- *  Object memory usage can evolve when re-used. */
-ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
-ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
-ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
-
 /*! ZSTD_estimate*() :
  *  These functions make it possible to estimate memory usage
  *  of a future {D,C}Ctx, before its creation.
@@ -507,7 +1222,7 @@
  *  It will also consider src size to be arbitrarily "large", which is worst case.
  *  If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
  *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
  *  Note : CCtx size estimation is only correct for single-threaded compression. */
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
 ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
@@ -519,7 +1234,7 @@
  *  It will also consider src size to be arbitrarily "large", which is worst case.
  *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
  *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
  *  Note : CStream size estimation is only correct for single-threaded compression.
  *  ZSTD_DStream memory budget depends on window Size.
  *  This information can be passed manually, using ZSTD_estimateDStreamSize,
@@ -582,6 +1297,7 @@
                                         ZSTD_dictLoadMethod_e dictLoadMethod,
                                         ZSTD_dictContentType_e dictContentType);
 
+
 /*! Custom memory allocation :
  *  These prototypes make it possible to pass your own allocation/free functions.
  *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
@@ -616,46 +1332,201 @@
 
 /*! ZSTD_createCDict_byReference() :
  *  Create a digested dictionary for compression
- *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
- *  It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
+ *  Dictionary content is just referenced, not duplicated.
+ *  As a consequence, `dictBuffer` **must** outlive CDict,
+ *  and its content must remain unmodified throughout the lifetime of CDict. */
 ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
 
 /*! ZSTD_getCParams() :
-*   @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
-*   `estimatedSrcSize` value is optional, select 0 if not known */
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
 ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 
 /*! ZSTD_getParams() :
-*   same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
-*   All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
 ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 
 /*! ZSTD_checkCParams() :
-*   Ensure param values remain within authorized range */
+ *  Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
 ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
 
 /*! ZSTD_adjustCParams() :
  *  optimize params for a given `srcSize` and `dictSize`.
- *  both values are optional, select `0` if unknown. */
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ *  This function never fails (wide contract) */
 ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
 
 /*! ZSTD_compress_advanced() :
-*   Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */
-ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
-                                  void* dst, size_t dstCapacity,
-                            const void* src, size_t srcSize,
-                            const void* dict,size_t dictSize,
-                                  ZSTD_parameters params);
+ *  Same as ZSTD_compress_usingDict(), with fine-tune control over compression parameters (by structure) */
+ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
+                                          void* dst, size_t dstCapacity,
+                                    const void* src, size_t srcSize,
+                                    const void* dict,size_t dictSize,
+                                          ZSTD_parameters params);
 
 /*! ZSTD_compress_usingCDict_advanced() :
-*   Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
+ *  Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
 ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
-                                  void* dst, size_t dstCapacity,
-                            const void* src, size_t srcSize,
-                            const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
+                                              void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize,
+                                        const ZSTD_CDict* cdict,
+                                              ZSTD_frameParameters fParams);
 
 
-/*--- Advanced decompression functions ---*/
+/*! ZSTD_CCtx_loadDictionary_byReference() :
+ *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
+ *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_loadDictionary_advanced() :
+ *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
+ *  how to load the dictionary (by copy ? by reference ?)
+ *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_refPrefix_advanced() :
+ *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
+ *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/* ===   experimental parameters   === */
+/* these parameters can be used with ZSTD_setParameter()
+ * they are not guaranteed to remain supported in the future */
+
+ /* Enables rsyncable mode,
+  * which makes compressed files more rsync friendly
+  * by adding periodic synchronization points to the compressed data.
+  * The target average block size is ZSTD_c_jobSize / 2.
+  * It's possible to modify the job size to increase or decrease
+  * the granularity of the synchronization point.
+  * Once the jobSize is smaller than the window size,
+  * it will result in compression ratio degradation.
+  * NOTE 1: rsyncable mode only works when multithreading is enabled.
+  * NOTE 2: rsyncable performs poorly in combination with long range mode,
+  * since it will decrease the effectiveness of synchronization points,
+  * though mileage may vary.
+  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
+  * If the selected compression level is already running significantly slower,
+  * the overall speed won't be significantly impacted.
+  */
+ #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
+
+/* Select a compression format.
+ * The value must be of type ZSTD_format_e.
+ * See ZSTD_format_e enum definition for details */
+#define ZSTD_c_format ZSTD_c_experimentalParam2
+
+/* Force back-reference distances to remain < windowSize,
+ * even when referencing into Dictionary content (default:0) */
+#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
+
+/* Controls whether the contents of a CDict
+ * are used in place, or copied into the working context.
+ * Accepts values from the ZSTD_dictAttachPref_e enum.
+ * See the comments on that enum for an explanation of the feature. */
+#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
+/* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size (default:0) */
+#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
+
+/*! ZSTD_CCtx_getParameter() :
+ *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
+ *  and store it into int* value.
+ * @return : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
+
+
+/*! ZSTD_CCtx_params :
+ *  Quick howto :
+ *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ *                                     an existing ZSTD_CCtx_params structure.
+ *                                     This is similar to
+ *                                     ZSTD_CCtx_setParameter().
+ *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ *                                    an existing CCtx.
+ *                                    These parameters will be applied to
+ *                                    all subsequent frames.
+ *  - ZSTD_compressStream2() : Do compression using the CCtx.
+ *  - ZSTD_freeCCtxParams() : Free the memory.
+ *
+ *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ *  for static allocation of CCtx for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_reset() :
+ *  Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ *  Initializes the compression parameters of cctxParams according to
+ *  compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ *  Initializes the compression and frame parameters of cctxParams according to
+ *  params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+/*! ZSTD_CCtxParams_setParameter() :
+ *  Similar to ZSTD_CCtx_setParameter.
+ *  Set one compression parameter, selected by enum ZSTD_cParameter.
+ *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+
+/*! ZSTD_CCtxParams_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ *  Apply a set of ZSTD_CCtx_params to the compression context.
+ *  This can be done even after compression is started,
+ *    if nbWorkers==0, this will have no impact until a new compression is started.
+ *    if nbWorkers>=1, new parameters will be picked up at next job,
+ *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+/*! ZSTD_compressStream2_simpleArgs() :
+ *  Same as ZSTD_compressStream2(),
+ *  but using only integral types as arguments.
+ *  This variant might be helpful for binders from dynamic languages
+ *  which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
+                            ZSTD_CCtx* cctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos,
+                            ZSTD_EndDirective endOp);
+
+
+/***************************************
+*  Advanced decompression functions
+***************************************/
 
 /*! ZSTD_isFrame() :
  *  Tells if the content of `buffer` starts with a valid Frame Identifier.
@@ -671,78 +1542,201 @@
  *  it must remain read accessible throughout the lifetime of DDict */
 ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
 
+/*! ZSTD_DCtx_loadDictionary_byReference() :
+ *  Same as ZSTD_DCtx_loadDictionary(),
+ *  but references `dict` content instead of copying it into `dctx`.
+ *  This saves memory if `dict` remains around.,
+ *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
 
-/*! ZSTD_getDictID_fromDict() :
- *  Provides the dictID stored within dictionary.
- *  if @return == 0, the dictionary is not conformant with Zstandard specification.
- *  It can still be loaded, but as a content-only dictionary. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+/*! ZSTD_DCtx_loadDictionary_advanced() :
+ *  Same as ZSTD_DCtx_loadDictionary(),
+ *  but gives direct control over
+ *  how to load the dictionary (by copy ? by reference ?)
+ *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
 
-/*! ZSTD_getDictID_fromDDict() :
- *  Provides the dictID of the dictionary loaded into `ddict`.
- *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+/*! ZSTD_DCtx_refPrefix_advanced() :
+ *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
+ *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
 
-/*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompressed the frame stored within `src`.
- *  If @return == 0, the dictID could not be decoded.
- *  This could for one of the following reasons :
- *  - The frame does not require a dictionary to be decoded (most common case).
- *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- *    Note : this use case also happens when using a non-conformant dictionary.
- *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- *  - This is not a Zstandard frame.
- *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
+ *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
+ *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+/* ZSTD_d_format
+ * experimental parameter,
+ * allowing selection between ZSTD_format_e input compression formats
+ */
+#define ZSTD_d_format ZSTD_d_experimentalParam1
+
+/*! ZSTD_DCtx_setFormat() :
+ *  Instruct the decoder context about what kind of data to decode next.
+ *  This instruction is mandatory to decode data without a fully-formed header,
+ *  such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+/*! ZSTD_decompressStream_simpleArgs() :
+ *  Same as ZSTD_decompressStream(),
+ *  but using only integral types as arguments.
+ *  This can be helpful for binders from dynamic languages
+ *  which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
+                            ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos);
 
 
 /********************************************************************
 *  Advanced streaming functions
+*  Warning : most of these functions are now redundant with the Advanced API.
+*  Once Advanced API reaches "stable" status,
+*  redundant functions will be deprecated, and then at some point removed.
 ********************************************************************/
 
 /*=====   Advanced Streaming compression functions  =====*/
-ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
+/**! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+/**! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
+ */
 ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
-                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */
+                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+/**! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *     ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
 
 /*! ZSTD_resetCStream() :
- *  start a new compression job, using same parameters from previous job.
- *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place..
+ * This function is deprecated, and is equivalent to:
+ *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ *  start a new frame, using same parameters from previous frame.
+ *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
  *  Note that zcs must be init at least once before using ZSTD_resetCStream().
  *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
  *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
  *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
  *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ */
 ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
 
 
 typedef struct {
-    unsigned long long ingested;
-    unsigned long long consumed;
-    unsigned long long produced;
+    unsigned long long ingested;   /* nb input bytes read and buffered */
+    unsigned long long consumed;   /* nb input bytes actually compressed */
+    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
+    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+    unsigned currentJobID;         /* MT only : latest started job nb */
+    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
 } ZSTD_frameProgression;
 
-/* ZSTD_getFrameProgression():
+/* ZSTD_getFrameProgression() :
  * tells how much data has been ingested (read from input)
  * consumed (input actually compressed) and produced (output) for current frame.
- * Therefore, (ingested - consumed) is amount of input data buffered internally, not yet compressed.
- * Can report progression inside worker threads (multi-threading and non-blocking mode).
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
  */
-ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
 
+/*! ZSTD_toFlushNow() :
+ *  Tell how many bytes are ready to be flushed immediately.
+ *  Useful for multithreading scenarios (nbWorkers >= 1).
+ *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ *  and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ *  if @return == 0, it means either :
+ *  + there is no active job (could be checked with ZSTD_frameProgression()), or
+ *  + oldest job is still actively compressing data,
+ *    but everything it has produced has also been flushed so far,
+ *    therefore flush speed is limited by production speed of oldest job
+ *    irrespective of the speed of concurrent (and newer) jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
 
 
 /*=====   Advanced Streaming decompression functions  =====*/
-typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
-ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);   /* obsolete : this API will be removed in a future version */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict is referenced, it must outlive decompression session */
-ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *     ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
 
 
 /*********************************************************************
@@ -880,7 +1874,17 @@
     unsigned dictID;
     unsigned checksumFlag;
 } ZSTD_frameHeader;
+
+/*! ZSTD_getFrameHeader() :
+ *  decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ *           or an error code, which can be tested using ZSTD_isError() */
 ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ *  same as ZSTD_getFrameHeader(),
+ *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
 ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
 
 ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
@@ -897,462 +1901,6 @@
 
 
 
-/* ============================================ */
-/**       New advanced API (experimental)       */
-/* ============================================ */
-
-/* notes on API design :
- *   In this proposal, parameters are pushed one by one into an existing context,
- *   and then applied on all subsequent compression jobs.
- *   When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
- *
- *   This API is intended to replace all others advanced / experimental API entry points.
- *   But it stands a reasonable chance to become "stable", after a reasonable testing period.
- */
-
-/* note on naming convention :
- *   Initially, the API favored names like ZSTD_setCCtxParameter() .
- *   In this proposal, convention is changed towards ZSTD_CCtx_setParameter() .
- *   The main driver is that it identifies more clearly the target object type.
- *   It feels clearer when considering multiple targets :
- *   ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter())
- *   ZSTD_CCtxParams_setParameter()  (rather than ZSTD_setCCtxParamsParameter() )
- *   etc...
- */
-
-/* note on enum design :
- * All enum will be pinned to explicit values before reaching "stable API" status */
-
-typedef enum {
-    /* Opened question : should we have a format ZSTD_f_auto ?
-     * Today, it would mean exactly the same as ZSTD_f_zstd1.
-     * But, in the future, should several formats become supported,
-     * on the compression side, it would mean "default format".
-     * On the decompression side, it would mean "automatic format detection",
-     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
-     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
-     * This question could be kept for later, when there are actually multiple formats to support,
-     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
-    ZSTD_f_zstd1 = 0,        /* zstd frame format, specified in zstd_compression_format.md (default) */
-    ZSTD_f_zstd1_magicless,  /* Variant of zstd frame format, without initial 4-bytes magic number.
-                              * Useful to save 4 bytes per generated frame.
-                              * Decoder cannot recognise automatically this format, requiring instructions. */
-} ZSTD_format_e;
-
-typedef enum {
-    /* compression format */
-    ZSTD_p_format = 10,      /* See ZSTD_format_e enum definition.
-                              * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
-
-    /* compression parameters */
-    ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
-                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
-                              * Special: value 0 means "do not change cLevel".
-                              * Note 1 : it's possible to pass a negative compression level by casting it to unsigned type.
-                              * Note 2 : setting a level sets all default values of other compression parameters.
-                              * Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */
-    ZSTD_p_windowLog,        /* Maximum allowed back-reference distance, expressed as power of 2.
-                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
-                              * Special: value 0 means "use default windowLog".
-                              * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
-                              *       requires explicitly allowing such window size during decompression stage. */
-    ZSTD_p_hashLog,          /* Size of the probe table, as a power of 2.
-                              * Resulting table size is (1 << (hashLog+2)).
-                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
-                              * Larger tables improve compression ratio of strategies <= dFast,
-                              * and improve speed of strategies > dFast.
-                              * Special: value 0 means "use default hashLog". */
-    ZSTD_p_chainLog,         /* Size of the full-search table, as a power of 2.
-                              * Resulting table size is (1 << (chainLog+2)).
-                              * Larger tables result in better and slower compression.
-                              * This parameter is useless when using "fast" strategy.
-                              * Special: value 0 means "use default chainLog". */
-    ZSTD_p_searchLog,        /* Number of search attempts, as a power of 2.
-                              * More attempts result in better and slower compression.
-                              * This parameter is useless when using "fast" and "dFast" strategies.
-                              * Special: value 0 means "use default searchLog". */
-    ZSTD_p_minMatch,         /* Minimum size of searched matches (note : repCode matches can be smaller).
-                              * Larger values make faster compression and decompression, but decrease ratio.
-                              * Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
-                              * Note that currently, for all strategies < btopt, effective minimum is 4.
-                              *                    , for all strategies > fast, effective maximum is 6.
-                              * Special: value 0 means "use default minMatchLength". */
-    ZSTD_p_targetLength,     /* Impact of this field depends on strategy.
-                              * For strategies btopt & btultra:
-                              *     Length of Match considered "good enough" to stop search.
-                              *     Larger values make compression stronger, and slower.
-                              * For strategy fast:
-                              *     Distance between match sampling.
-                              *     Larger values make compression faster, and weaker.
-                              * Special: value 0 means "use default targetLength". */
-    ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
-                              * Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
-                              * The higher the value of selected strategy, the more complex it is,
-                              * resulting in stronger and slower compression.
-                              * Special: value 0 means "use default strategy". */
-
-    ZSTD_p_enableLongDistanceMatching=160, /* Enable long distance matching.
-                                         * This parameter is designed to improve compression ratio
-                                         * for large inputs, by finding large matches at long distance.
-                                         * It increases memory usage and window size.
-                                         * Note: enabling this parameter increases ZSTD_p_windowLog to 128 MB
-                                         * except when expressly set to a different value. */
-    ZSTD_p_ldmHashLog,       /* Size of the table for long distance matching, as a power of 2.
-                              * Larger values increase memory usage and compression ratio,
-                              * but decrease compression speed.
-                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
-                              * default: windowlog - 7.
-                              * Special: value 0 means "automatically determine hashlog". */
-    ZSTD_p_ldmMinMatch,      /* Minimum match size for long distance matcher.
-                              * Larger/too small values usually decrease compression ratio.
-                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
-                              * Special: value 0 means "use default value" (default: 64). */
-    ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
-                              * Larger values improve collision resolution but decrease compression speed.
-                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX .
-                              * Special: value 0 means "use default value" (default: 3). */
-    ZSTD_p_ldmHashEveryLog,  /* Frequency of inserting/looking up entries in the LDM hash table.
-                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
-                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
-                              * Larger values improve compression speed.
-                              * Deviating far from default value will likely result in a compression ratio decrease.
-                              * Special: value 0 means "automatically determine hashEveryLog". */
-
-    /* frame parameters */
-    ZSTD_p_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
-                              * Content size must be known at the beginning of compression,
-                              * it is provided using ZSTD_CCtx_setPledgedSrcSize() */
-    ZSTD_p_checksumFlag,     /* A 32-bits checksum of content is written at end of frame (default:0) */
-    ZSTD_p_dictIDFlag,       /* When applicable, dictionary's ID is written into frame header (default:1) */
-
-    /* multi-threading parameters */
-    /* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD).
-     * They return an error otherwise. */
-    ZSTD_p_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
-                              * When nbWorkers >= 1, triggers asynchronous mode :
-                              * ZSTD_compress_generic() consumes some input, flush some output if possible, and immediately gives back control to caller,
-                              * while compression work is performed in parallel, within worker threads.
-                              * (note : a strong exception to this rule is when first invocation sets ZSTD_e_end : it becomes a blocking call).
-                              * More workers improve speed, but also increase memory usage.
-                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
-    ZSTD_p_jobSize,          /* Size of a compression job. This value is enforced only in non-blocking mode.
-                              * Each compression job is completed in parallel, so this value indirectly controls the nb of active threads.
-                              * 0 means default, which is dynamically determined based on compression parameters.
-                              * Job size must be a minimum of overlapSize, or 1 MB, whichever is largest.
-                              * The minimum size is automatically and transparently enforced */
-    ZSTD_p_overlapSizeLog,   /* Size of previous input reloaded at the beginning of each job.
-                              * 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
-
-    /* =================================================================== */
-    /* experimental parameters - no stability guaranteed                   */
-    /* =================================================================== */
-
-    ZSTD_p_compressLiterals=1000, /* control huffman compression of literals (enabled) by default.
-                              * disabling it improves speed and decreases compression ratio by a large amount.
-                              * note : this setting is automatically updated when changing compression level.
-                              *        positive compression levels set ZSTD_p_compressLiterals to 1.
-                              *        negative compression levels set ZSTD_p_compressLiterals to 0. */
-
-    ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
-                              * even when referencing into Dictionary content (default:0) */
-
-} ZSTD_cParameter;
-
-
-/*! ZSTD_CCtx_setParameter() :
- *  Set one compression parameter, selected by enum ZSTD_cParameter.
- *  Setting a parameter is generally only possible during frame initialization (before starting compression),
- *  except for a few exceptions which can be updated during compression: compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
- *  Note : when `value` is an enum, cast it to unsigned for proper type checking.
- *  @result : informational value (typically, value being set clamped correctly),
- *            or an error code (which can be tested with ZSTD_isError()). */
-ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
-
-/*! ZSTD_CCtx_setPledgedSrcSize() :
- *  Total input data size to be compressed as a single frame.
- *  This value will be controlled at the end, and result in error if not respected.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : 0 means zero, empty.
- *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
- *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new compression job.
- *  Note 2 : If all data is provided and consumed in a single round,
- *           this value is overriden by srcSize instead. */
-ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
-
-/*! ZSTD_CCtx_loadDictionary() :
- *  Create an internal CDict from `dict` buffer.
- *  Decompression will have to use same dictionary.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special: Adding a NULL (or 0-size) dictionary invalidates previous dictionary,
- *           meaning "return to no-dictionary mode".
- *  Note 1 : Dictionary will be used for all future compression jobs.
- *           To return to "no-dictionary" situation, load a NULL dictionary
- *  Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
- *           For this reason, compression parameters cannot be changed anymore after loading a dictionary.
- *           It's also a CPU consuming operation, with non-negligible impact on latency.
- *  Note 3 :`dict` content will be copied internally.
- *           Use ZSTD_CCtx_loadDictionary_byReference() to reference dictionary content instead.
- *           In such a case, dictionary buffer must outlive its users.
- *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
- *           to precisely select how dictionary content must be interpreted. */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
-
-
-/*! ZSTD_CCtx_refCDict() :
- *  Reference a prepared dictionary, to be used for all next compression jobs.
- *  Note that compression parameters are enforced from within CDict,
- *  and supercede any compression parameter previously set within CCtx.
- *  The dictionary will remain valid for future compression jobs using same CCtx.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special : adding a NULL CDict means "return to no-dictionary mode".
- *  Note 1 : Currently, only one dictionary can be managed.
- *           Adding a new dictionary effectively "discards" any previous one.
- *  Note 2 : CDict is just referenced, its lifetime must outlive CCtx. */
-ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-
-/*! ZSTD_CCtx_refPrefix() :
- *  Reference a prefix (single-usage dictionary) for next compression job.
- *  Decompression need same prefix to properly regenerate data.
- *  Prefix is **only used once**. Tables are discarded at end of compression job.
- *  Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
- *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
- *  Note 1 : Prefix buffer is referenced. It must outlive compression job.
- *  Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
- *           It's a CPU consuming operation, with non-negligible impact on latency.
- *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
- *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
-
-/*! ZSTD_CCtx_reset() :
- *  Return a CCtx to clean state.
- *  Useful after an error, or to interrupt an ongoing compression job and start a new one.
- *  Any internal data not yet flushed is cancelled.
- *  Dictionary (if any) is dropped.
- *  All parameters are back to default values.
- *  It's possible to modify compression parameters after a reset.
- */
-ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);
-
-
-
-typedef enum {
-    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal conditions */
-    ZSTD_e_flush,      /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
-    ZSTD_e_end         /* flush any remaining data and close current frame. Any additional data starts a new frame. */
-} ZSTD_EndDirective;
-
-/*! ZSTD_compress_generic() :
- *  Behave about the same as ZSTD_compressStream. To note :
- *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
- *  - Compression parameters cannot be changed once compression is started.
- *  - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
- *  - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
- *  - In single-thread mode (default), function is blocking : it completed its job before returning to caller.
- *  - In multi-thread mode, function is non-blocking : it just acquires a copy of input, and distribute job to internal worker threads,
- *                                                     and then immediately returns, just indicating that there is some data remaining to be flushed.
- *                                                     The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
- *  - Exception : in multi-threading mode, if the first call requests a ZSTD_e_end directive, it is blocking : it will complete compression before giving back control to caller.
- *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
- *            or an error code, which can be tested using ZSTD_isError().
- *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
- *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
- *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
- *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
- *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
- *            Before starting a new compression job, or changing compression parameters,
- *            it is required to fully flush internal buffers.
- */
-ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
-                                          ZSTD_outBuffer* output,
-                                          ZSTD_inBuffer* input,
-                                          ZSTD_EndDirective endOp);
-
-
-/*! ZSTD_compress_generic_simpleArgs() :
- *  Same as ZSTD_compress_generic(),
- *  but using only integral types as arguments.
- *  Argument list is larger than ZSTD_{in,out}Buffer,
- *  but can be helpful for binders from dynamic languages
- *  which have troubles handling structures containing memory pointers.
- */
-ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs (
-                            ZSTD_CCtx* cctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos,
-                            ZSTD_EndDirective endOp);
-
-
-/*! ZSTD_CCtx_params :
- *  Quick howto :
- *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
- *  - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
- *                                    an existing ZSTD_CCtx_params structure.
- *                                    This is similar to
- *                                    ZSTD_CCtx_setParameter().
- *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
- *                                    an existing CCtx.
- *                                    These parameters will be applied to
- *                                    all subsequent compression jobs.
- *  - ZSTD_compress_generic() : Do compression using the CCtx.
- *  - ZSTD_freeCCtxParams() : Free the memory.
- *
- *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
- *  for static allocation for single-threaded compression.
- */
-ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
-ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
-
-
-/*! ZSTD_CCtxParams_reset() :
- *  Reset params to default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
-
-/*! ZSTD_CCtxParams_init() :
- *  Initializes the compression parameters of cctxParams according to
- *  compression level. All other parameters are reset to their default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
-
-/*! ZSTD_CCtxParams_init_advanced() :
- *  Initializes the compression and frame parameters of cctxParams according to
- *  params. All other parameters are reset to their default values.
- */
-ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
-
-
-/*! ZSTD_CCtxParam_setParameter() :
- *  Similar to ZSTD_CCtx_setParameter.
- *  Set one compression parameter, selected by enum ZSTD_cParameter.
- *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
- *  Note : when `value` is an enum, cast it to unsigned for proper type checking.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
-
-/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
- *  Apply a set of ZSTD_CCtx_params to the compression context.
- *  This can be done even after compression is started,
- *    if nbWorkers==0, this will have no impact until a new compression is started.
- *    if nbWorkers>=1, new parameters will be picked up at next job,
- *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
- */
-ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
-        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
-
-
-/*===   Advanced parameters for decompression API  ===*/
-
-/* The following parameters must be set after creating a ZSTD_DCtx* (or ZSTD_DStream*) object,
- * but before starting decompression of a frame.
- */
-
-/*! ZSTD_DCtx_loadDictionary() :
- *  Create an internal DDict from dict buffer,
- *  to be used to decompress next frames.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
- *            meaning "return to no-dictionary mode".
- *  Note 1 : `dict` content will be copied internally.
- *            Use ZSTD_DCtx_loadDictionary_byReference()
- *            to reference dictionary content instead.
- *            In which case, the dictionary buffer must outlive its users.
- *  Note 2 : Loading a dictionary involves building tables,
- *           which has a non-negligible impact on CPU usage and latency.
- *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
- *           how dictionary content will be interpreted and loaded.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
-
-
-/*! ZSTD_DCtx_refDDict() :
- *  Reference a prepared dictionary, to be used to decompress next frames.
- *  The dictionary remains active for decompression of future frames using same DCtx.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : Currently, only one dictionary can be managed.
- *           Referencing a new dictionary effectively "discards" any previous one.
- *  Special : adding a NULL DDict means "return to no-dictionary mode".
- *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-
-
-/*! ZSTD_DCtx_refPrefix() :
- *  Reference a prefix (single-usage dictionary) for next compression job.
- *  Prefix is **only used once**. It must be explicitly referenced before each frame.
- *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
- *  Note 2 : Prefix buffer is referenced. It must outlive compression job.
- *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
- *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
- *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
- */
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize);
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
-
-
-/*! ZSTD_DCtx_setMaxWindowSize() :
- *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
- *  This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario).
- *  This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode.
- *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX)
- * @return : 0, or an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
-
-
-/*! ZSTD_DCtx_setFormat() :
- *  Instruct the decoder context about what kind of data to decode next.
- *  This instruction is mandatory to decode data without a fully-formed header,
- *  such ZSTD_f_zstd1_magicless for example.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()).
- */
-ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
-
-
-/*! ZSTD_decompress_generic() :
- *  Behave the same as ZSTD_decompressStream.
- *  Decompression parameters cannot be changed once decompression is started.
- * @return : an error code, which can be tested using ZSTD_isError()
- *           if >0, a hint, nb of expected input bytes for next invocation.
- *           `0` means : a frame has just been fully decoded and flushed.
- */
-ZSTDLIB_API size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
-                                           ZSTD_outBuffer* output,
-                                           ZSTD_inBuffer* input);
-
-
-/*! ZSTD_decompress_generic_simpleArgs() :
- *  Same as ZSTD_decompress_generic(),
- *  but using only integral types as arguments.
- *  Argument list is larger than ZSTD_{in,out}Buffer,
- *  but can be helpful for binders from dynamic languages
- *  which have troubles handling structures containing memory pointers.
- */
-ZSTDLIB_API size_t ZSTD_decompress_generic_simpleArgs (
-                            ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity, size_t* dstPos,
-                      const void* src, size_t srcSize, size_t* srcPos);
-
-
-/*! ZSTD_DCtx_reset() :
- *  Return a DCtx to clean state.
- *  If a decompression was ongoing, any internal data not yet flushed is cancelled.
- *  All parameters are back to default values, including sticky ones.
- *  Dictionary (if any) is dropped.
- *  Parameters can be modified again after a reset.
- */
-ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
-
-
 
 /* ============================ */
 /**       Block level API       */
@@ -1372,10 +1920,10 @@
       + copyCCtx() and copyDCtx() can be used too
     - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
       + If input is larger than a block size, it's necessary to split input data into multiple blocks
-      + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
+      + For inputs larger than a single block, really consider using regular ZSTD_compress() instead.
         Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
     - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
-      In which case, nothing is produced into `dst`.
+      In which case, nothing is produced into `dst` !
       + User must test for such outcome and deal directly with uncompressed data
       + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
       + In case of multiple successive blocks, should some of them be uncompressed,
@@ -1383,8 +1931,6 @@
         Use ZSTD_insertBlock() for such a case.
 */
 
-#define ZSTD_BLOCKSIZELOG_MAX 17
-#define ZSTD_BLOCKSIZE_MAX   (1<<ZSTD_BLOCKSIZELOG_MAX)   /* define, for static allocation */
 /*=====   Raw zstd block functions  =====*/
 ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
 ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
diff --git a/vendor/github.com/DataDog/zstd/zstd_common.c b/vendor/github.com/DataDog/zstd/zstd_common.c
index bccc948..667f4a2 100644
--- a/vendor/github.com/DataDog/zstd/zstd_common.c
+++ b/vendor/github.com/DataDog/zstd/zstd_common.c
@@ -30,8 +30,10 @@
 /*-****************************************
 *  ZSTD Error Management
 ******************************************/
+#undef ZSTD_isError   /* defined within zstd_internal.h */
 /*! ZSTD_isError() :
- *  tells if a return value is an error code */
+ *  tells if a return value is an error code
+ *  symbol is required for external callers */
 unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
 
 /*! ZSTD_getErrorName() :
@@ -46,11 +48,6 @@
  *  provides error code string from enum */
 const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
 
-/*! g_debuglog_enable :
- *  turn on/off debug traces (global switch) */
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 2)
-int g_debuglog_enable = 1;
-#endif
 
 
 /*=**************************************************************
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c
index 2aa26da..1476512 100644
--- a/vendor/github.com/DataDog/zstd/zstd_compress.c
+++ b/vendor/github.com/DataDog/zstd/zstd_compress.c
@@ -8,21 +8,14 @@
  * You may select, at your option, one of the above-listed licenses.
  */
 
-
-/*-*************************************
-*  Tuning parameters
-***************************************/
-#ifndef ZSTD_CLEVEL_DEFAULT
-#  define ZSTD_CLEVEL_DEFAULT 3
-#endif
-
-
 /*-*************************************
 *  Dependencies
 ***************************************/
+#include <limits.h>         /* INT_MAX */
 #include <string.h>         /* memset */
 #include "cpu.h"
 #include "mem.h"
+#include "hist.h"           /* HIST_countFast_wksp */
 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
 #include "fse.h"
 #define HUF_STATIC_LINKING_ONLY
@@ -54,7 +47,6 @@
     size_t workspaceSize;
     ZSTD_matchState_t matchState;
     ZSTD_compressedBlockState_t cBlockState;
-    ZSTD_compressionParameters cParams;
     ZSTD_customMem customMem;
     U32 dictID;
 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
@@ -64,17 +56,26 @@
     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
 }
 
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+    assert(cctx != NULL);
+    memset(cctx, 0, sizeof(*cctx));
+    cctx->customMem = memManager;
+    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+        assert(!ZSTD_isError(err));
+        (void)err;
+    }
+}
+
 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
 {
     ZSTD_STATIC_ASSERT(zcss_init==0);
     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
+    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
         if (!cctx) return NULL;
-        cctx->customMem = customMem;
-        cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
-        cctx->requestedParams.fParams.contentSizeFlag = 1;
-        cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+        ZSTD_initCCtx(cctx, customMem);
         return cctx;
     }
 }
@@ -102,17 +103,44 @@
     return cctx;
 }
 
-size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+/**
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
 {
-    if (cctx==NULL) return 0;   /* support free on NULL */
-    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
+    ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
+    ZSTD_freeCDict(cctx->localDict.cdict);
+    memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+    cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+    return bufferSize + cdictSize;
+}
+
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+{
+    assert(cctx != NULL);
+    assert(cctx->staticSize == 0);
     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
-    ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
+    ZSTD_clearAllDicts(cctx);
 #ifdef ZSTD_MULTITHREAD
     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
 #endif
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support free on NULL */
+    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+                    "not compatible with static CCtx");
+    ZSTD_freeCCtxContent(cctx);
     ZSTD_free(cctx, cctx->customMem);
-    return 0;   /* reserved as a potential error code in the future */
+    return 0;
 }
 
 
@@ -121,7 +149,7 @@
 #ifdef ZSTD_MULTITHREAD
     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
 #else
-    (void) cctx;
+    (void)cctx;
     return 0;
 #endif
 }
@@ -131,7 +159,7 @@
 {
     if (cctx==NULL) return 0;   /* support sizeof on NULL */
     return sizeof(*cctx) + cctx->workSpaceSize
-           + ZSTD_sizeof_CDict(cctx->cdictLocal)
+           + ZSTD_sizeof_localDict(cctx->localDict)
            + ZSTD_sizeof_mtctx(cctx);
 }
 
@@ -143,21 +171,6 @@
 /* private API call, for dictBuilder only */
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
 
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
-        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
-{
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
-    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
-    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
-    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
-    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
-    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
-    if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
-    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
-    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
-    return cParams;
-}
-
 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
         ZSTD_compressionParameters cParams)
 {
@@ -202,7 +215,7 @@
 }
 
 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
-    if (!cctxParams) { return ERROR(GENERIC); }
+    RETURN_ERROR_IF(!cctxParams, GENERIC);
     memset(cctxParams, 0, sizeof(*cctxParams));
     cctxParams->compressionLevel = compressionLevel;
     cctxParams->fParams.contentSizeFlag = 1;
@@ -211,8 +224,8 @@
 
 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
 {
-    if (!cctxParams) { return ERROR(GENERIC); }
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    RETURN_ERROR_IF(!cctxParams, GENERIC);
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     memset(cctxParams, 0, sizeof(*cctxParams));
     cctxParams->cParams = params.cParams;
     cctxParams->fParams = params.fParams;
@@ -234,249 +247,553 @@
     return ret;
 }
 
-#define CLAMPCHECK(val,min,max) {            \
-    if (((val)<(min)) | ((val)>(max))) {     \
-        return ERROR(parameter_outOfBound);  \
-}   }
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+    ZSTD_bounds bounds = { 0, 0, 0 };
+
+    switch(param)
+    {
+    case ZSTD_c_compressionLevel:
+        bounds.lowerBound = ZSTD_minCLevel();
+        bounds.upperBound = ZSTD_maxCLevel();
+        return bounds;
+
+    case ZSTD_c_windowLog:
+        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_hashLog:
+        bounds.lowerBound = ZSTD_HASHLOG_MIN;
+        bounds.upperBound = ZSTD_HASHLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_chainLog:
+        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+        bounds.upperBound = ZSTD_CHAINLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_searchLog:
+        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_minMatch:
+        bounds.lowerBound = ZSTD_MINMATCH_MIN;
+        bounds.upperBound = ZSTD_MINMATCH_MAX;
+        return bounds;
+
+    case ZSTD_c_targetLength:
+        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+        return bounds;
+
+    case ZSTD_c_strategy:
+        bounds.lowerBound = ZSTD_STRATEGY_MIN;
+        bounds.upperBound = ZSTD_STRATEGY_MAX;
+        return bounds;
+
+    case ZSTD_c_contentSizeFlag:
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_checksumFlag:
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_dictIDFlag:
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_nbWorkers:
+        bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+        bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
+#else
+        bounds.upperBound = 0;
+#endif
+        return bounds;
+
+    case ZSTD_c_jobSize:
+        bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+        bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
+#else
+        bounds.upperBound = 0;
+#endif
+        return bounds;
+
+    case ZSTD_c_overlapLog:
+        bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
+        bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_enableLongDistanceMatching:
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_ldmHashLog:
+        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+        return bounds;
+
+    case ZSTD_c_ldmMinMatch:
+        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+        return bounds;
+
+    case ZSTD_c_ldmBucketSizeLog:
+        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+        return bounds;
+
+    case ZSTD_c_ldmHashRateLog:
+        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+        return bounds;
+
+    /* experimental parameters */
+    case ZSTD_c_rsyncable:
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_forceMaxWindow :
+        bounds.lowerBound = 0;
+        bounds.upperBound = 1;
+        return bounds;
+
+    case ZSTD_c_format:
+        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+        bounds.lowerBound = ZSTD_f_zstd1;
+        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
+        return bounds;
+
+    case ZSTD_c_forceAttachDict:
+        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
+        bounds.lowerBound = ZSTD_dictDefaultAttach;
+        bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
+        return bounds;
+
+    case ZSTD_c_literalCompressionMode:
+        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
+        bounds.lowerBound = ZSTD_lcm_auto;
+        bounds.upperBound = ZSTD_lcm_uncompressed;
+        return bounds;
+
+    case ZSTD_c_targetCBlockSize:
+        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
+        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
+        return bounds;
+
+    default:
+        {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
+            return boundError;
+        }
+    }
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+    if (ZSTD_isError(bounds.error)) return 0;
+    if (value < bounds.lowerBound) return 0;
+    if (value > bounds.upperBound) return 0;
+    return 1;
+}
+
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+    if (ZSTD_isError(bounds.error)) return bounds.error;
+    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+    if (*value > bounds.upperBound) *value = bounds.upperBound;
+    return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+                    parameter_outOfBound); \
+}
 
 
 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
 {
     switch(param)
     {
-    case ZSTD_p_compressionLevel:
-    case ZSTD_p_hashLog:
-    case ZSTD_p_chainLog:
-    case ZSTD_p_searchLog:
-    case ZSTD_p_minMatch:
-    case ZSTD_p_targetLength:
-    case ZSTD_p_compressionStrategy:
-    case ZSTD_p_compressLiterals:
+    case ZSTD_c_compressionLevel:
+    case ZSTD_c_hashLog:
+    case ZSTD_c_chainLog:
+    case ZSTD_c_searchLog:
+    case ZSTD_c_minMatch:
+    case ZSTD_c_targetLength:
+    case ZSTD_c_strategy:
         return 1;
 
-    case ZSTD_p_format:
-    case ZSTD_p_windowLog:
-    case ZSTD_p_contentSizeFlag:
-    case ZSTD_p_checksumFlag:
-    case ZSTD_p_dictIDFlag:
-    case ZSTD_p_forceMaxWindow :
-    case ZSTD_p_nbWorkers:
-    case ZSTD_p_jobSize:
-    case ZSTD_p_overlapSizeLog:
-    case ZSTD_p_enableLongDistanceMatching:
-    case ZSTD_p_ldmHashLog:
-    case ZSTD_p_ldmMinMatch:
-    case ZSTD_p_ldmBucketSizeLog:
-    case ZSTD_p_ldmHashEveryLog:
+    case ZSTD_c_format:
+    case ZSTD_c_windowLog:
+    case ZSTD_c_contentSizeFlag:
+    case ZSTD_c_checksumFlag:
+    case ZSTD_c_dictIDFlag:
+    case ZSTD_c_forceMaxWindow :
+    case ZSTD_c_nbWorkers:
+    case ZSTD_c_jobSize:
+    case ZSTD_c_overlapLog:
+    case ZSTD_c_rsyncable:
+    case ZSTD_c_enableLongDistanceMatching:
+    case ZSTD_c_ldmHashLog:
+    case ZSTD_c_ldmMinMatch:
+    case ZSTD_c_ldmBucketSizeLog:
+    case ZSTD_c_ldmHashRateLog:
+    case ZSTD_c_forceAttachDict:
+    case ZSTD_c_literalCompressionMode:
+    case ZSTD_c_targetCBlockSize:
     default:
         return 0;
     }
 }
 
-size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
 {
-    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
+    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
     if (cctx->streamStage != zcss_init) {
         if (ZSTD_isUpdateAuthorized(param)) {
             cctx->cParamsChanged = 1;
         } else {
-            return ERROR(stage_wrong);
+            RETURN_ERROR(stage_wrong);
     }   }
 
     switch(param)
     {
-    case ZSTD_p_format :
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+    case ZSTD_c_nbWorkers:
+        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+                        "MT not compatible with static alloc");
+        break;
 
-    case ZSTD_p_compressionLevel:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+    case ZSTD_c_compressionLevel:
+    case ZSTD_c_windowLog:
+    case ZSTD_c_hashLog:
+    case ZSTD_c_chainLog:
+    case ZSTD_c_searchLog:
+    case ZSTD_c_minMatch:
+    case ZSTD_c_targetLength:
+    case ZSTD_c_strategy:
+    case ZSTD_c_ldmHashRateLog:
+    case ZSTD_c_format:
+    case ZSTD_c_contentSizeFlag:
+    case ZSTD_c_checksumFlag:
+    case ZSTD_c_dictIDFlag:
+    case ZSTD_c_forceMaxWindow:
+    case ZSTD_c_forceAttachDict:
+    case ZSTD_c_literalCompressionMode:
+    case ZSTD_c_jobSize:
+    case ZSTD_c_overlapLog:
+    case ZSTD_c_rsyncable:
+    case ZSTD_c_enableLongDistanceMatching:
+    case ZSTD_c_ldmHashLog:
+    case ZSTD_c_ldmMinMatch:
+    case ZSTD_c_ldmBucketSizeLog:
+    case ZSTD_c_targetCBlockSize:
+        break;
 
-    case ZSTD_p_windowLog:
-    case ZSTD_p_hashLog:
-    case ZSTD_p_chainLog:
-    case ZSTD_p_searchLog:
-    case ZSTD_p_minMatch:
-    case ZSTD_p_targetLength:
-    case ZSTD_p_compressionStrategy:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_p_compressLiterals:
-    case ZSTD_p_contentSizeFlag:
-    case ZSTD_p_checksumFlag:
-    case ZSTD_p_dictIDFlag:
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
-                                   * even when referencing into Dictionary content.
-                                   * default : 0 when using a CDict, 1 when using a Prefix */
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_p_nbWorkers:
-        if ((value>0) && cctx->staticSize) {
-            return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
-        }
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_p_jobSize:
-    case ZSTD_p_overlapSizeLog:
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    case ZSTD_p_enableLongDistanceMatching:
-    case ZSTD_p_ldmHashLog:
-    case ZSTD_p_ldmMinMatch:
-    case ZSTD_p_ldmBucketSizeLog:
-    case ZSTD_p_ldmHashEveryLog:
-        if (cctx->cdict) return ERROR(stage_wrong);
-        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
-    default: return ERROR(parameter_unsupported);
+    default: RETURN_ERROR(parameter_unsupported);
     }
+    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
 }
 
-size_t ZSTD_CCtxParam_setParameter(
-        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+                                    ZSTD_cParameter param, int value)
 {
-    DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
+    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
     switch(param)
     {
-    case ZSTD_p_format :
-        if (value > (unsigned)ZSTD_f_zstd1_magicless)
-            return ERROR(parameter_unsupported);
+    case ZSTD_c_format :
+        BOUNDCHECK(ZSTD_c_format, value);
         CCtxParams->format = (ZSTD_format_e)value;
         return (size_t)CCtxParams->format;
 
-    case ZSTD_p_compressionLevel : {
-        int cLevel = (int)value;  /* cast expected to restore negative sign */
-        if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
-        if (cLevel) {  /* 0 : does not change current level */
-            CCtxParams->disableLiteralCompression = (cLevel<0);  /* negative levels disable huffman */
-            CCtxParams->compressionLevel = cLevel;
+    case ZSTD_c_compressionLevel : {
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        if (value) {  /* 0 : does not change current level */
+            CCtxParams->compressionLevel = value;
         }
         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
         return 0;  /* return type (size_t) cannot represent negative values */
     }
 
-    case ZSTD_p_windowLog :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+    case ZSTD_c_windowLog :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_windowLog, value);
         CCtxParams->cParams.windowLog = value;
         return CCtxParams->cParams.windowLog;
 
-    case ZSTD_p_hashLog :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+    case ZSTD_c_hashLog :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_hashLog, value);
         CCtxParams->cParams.hashLog = value;
         return CCtxParams->cParams.hashLog;
 
-    case ZSTD_p_chainLog :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+    case ZSTD_c_chainLog :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_chainLog, value);
         CCtxParams->cParams.chainLog = value;
         return CCtxParams->cParams.chainLog;
 
-    case ZSTD_p_searchLog :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+    case ZSTD_c_searchLog :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_searchLog, value);
         CCtxParams->cParams.searchLog = value;
         return value;
 
-    case ZSTD_p_minMatch :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
-        CCtxParams->cParams.searchLength = value;
-        return CCtxParams->cParams.searchLength;
+    case ZSTD_c_minMatch :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_minMatch, value);
+        CCtxParams->cParams.minMatch = value;
+        return CCtxParams->cParams.minMatch;
 
-    case ZSTD_p_targetLength :
-        /* all values are valid. 0 => use default */
+    case ZSTD_c_targetLength :
+        BOUNDCHECK(ZSTD_c_targetLength, value);
         CCtxParams->cParams.targetLength = value;
         return CCtxParams->cParams.targetLength;
 
-    case ZSTD_p_compressionStrategy :
-        if (value>0)   /* 0 => use default */
-            CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
+    case ZSTD_c_strategy :
+        if (value!=0)   /* 0 => use default */
+            BOUNDCHECK(ZSTD_c_strategy, value);
         CCtxParams->cParams.strategy = (ZSTD_strategy)value;
         return (size_t)CCtxParams->cParams.strategy;
 
-    case ZSTD_p_compressLiterals:
-        CCtxParams->disableLiteralCompression = !value;
-        return !CCtxParams->disableLiteralCompression;
-
-    case ZSTD_p_contentSizeFlag :
+    case ZSTD_c_contentSizeFlag :
         /* Content size written in frame header _when known_ (default:1) */
-        DEBUGLOG(4, "set content size flag = %u", (value>0));
-        CCtxParams->fParams.contentSizeFlag = value > 0;
+        DEBUGLOG(4, "set content size flag = %u", (value!=0));
+        CCtxParams->fParams.contentSizeFlag = value != 0;
         return CCtxParams->fParams.contentSizeFlag;
 
-    case ZSTD_p_checksumFlag :
+    case ZSTD_c_checksumFlag :
         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
-        CCtxParams->fParams.checksumFlag = value > 0;
+        CCtxParams->fParams.checksumFlag = value != 0;
         return CCtxParams->fParams.checksumFlag;
 
-    case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
-        DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
+    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
         CCtxParams->fParams.noDictIDFlag = !value;
         return !CCtxParams->fParams.noDictIDFlag;
 
-    case ZSTD_p_forceMaxWindow :
-        CCtxParams->forceWindow = (value > 0);
+    case ZSTD_c_forceMaxWindow :
+        CCtxParams->forceWindow = (value != 0);
         return CCtxParams->forceWindow;
 
-    case ZSTD_p_nbWorkers :
+    case ZSTD_c_forceAttachDict : {
+        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+        CCtxParams->attachDictPref = pref;
+        return CCtxParams->attachDictPref;
+    }
+
+    case ZSTD_c_literalCompressionMode : {
+        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
+        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+        CCtxParams->literalCompressionMode = lcm;
+        return CCtxParams->literalCompressionMode;
+    }
+
+    case ZSTD_c_nbWorkers :
 #ifndef ZSTD_MULTITHREAD
-        if (value>0) return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
         return 0;
 #else
-        return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        CCtxParams->nbWorkers = value;
+        return CCtxParams->nbWorkers;
 #endif
 
-    case ZSTD_p_jobSize :
+    case ZSTD_c_jobSize :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
 #else
-        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
+        /* Adjust to the minimum non-default value. */
+        if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
+            value = ZSTDMT_JOBSIZE_MIN;
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+        assert(value >= 0);
+        CCtxParams->jobSize = value;
+        return CCtxParams->jobSize;
 #endif
 
-    case ZSTD_p_overlapSizeLog :
+    case ZSTD_c_overlapLog :
 #ifndef ZSTD_MULTITHREAD
-        return ERROR(parameter_unsupported);
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
 #else
-        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+        CCtxParams->overlapLog = value;
+        return CCtxParams->overlapLog;
 #endif
 
-    case ZSTD_p_enableLongDistanceMatching :
-        CCtxParams->ldmParams.enableLdm = (value>0);
+    case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+        return 0;
+#else
+        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+        CCtxParams->rsyncable = value;
+        return CCtxParams->rsyncable;
+#endif
+
+    case ZSTD_c_enableLongDistanceMatching :
+        CCtxParams->ldmParams.enableLdm = (value!=0);
         return CCtxParams->ldmParams.enableLdm;
 
-    case ZSTD_p_ldmHashLog :
-        if (value>0)   /* 0 ==> auto */
-            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+    case ZSTD_c_ldmHashLog :
+        if (value!=0)   /* 0 ==> auto */
+            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
         CCtxParams->ldmParams.hashLog = value;
         return CCtxParams->ldmParams.hashLog;
 
-    case ZSTD_p_ldmMinMatch :
-        if (value>0)   /* 0 ==> default */
-            CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
+    case ZSTD_c_ldmMinMatch :
+        if (value!=0)   /* 0 ==> default */
+            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
         CCtxParams->ldmParams.minMatchLength = value;
         return CCtxParams->ldmParams.minMatchLength;
 
-    case ZSTD_p_ldmBucketSizeLog :
-        if (value > ZSTD_LDM_BUCKETSIZELOG_MAX)
-            return ERROR(parameter_outOfBound);
+    case ZSTD_c_ldmBucketSizeLog :
+        if (value!=0)   /* 0 ==> default */
+            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
         CCtxParams->ldmParams.bucketSizeLog = value;
         return CCtxParams->ldmParams.bucketSizeLog;
 
-    case ZSTD_p_ldmHashEveryLog :
-        if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
-            return ERROR(parameter_outOfBound);
-        CCtxParams->ldmParams.hashEveryLog = value;
-        return CCtxParams->ldmParams.hashEveryLog;
+    case ZSTD_c_ldmHashRateLog :
+        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
+                        parameter_outOfBound);
+        CCtxParams->ldmParams.hashRateLog = value;
+        return CCtxParams->ldmParams.hashRateLog;
 
-    default: return ERROR(parameter_unsupported);
+    case ZSTD_c_targetCBlockSize :
+        if (value!=0)   /* 0 ==> default */
+            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
+        CCtxParams->targetCBlockSize = value;
+        return CCtxParams->targetCBlockSize;
+
+    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
     }
 }
 
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
+{
+    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_getParameter(
+        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
+{
+    switch(param)
+    {
+    case ZSTD_c_format :
+        *value = CCtxParams->format;
+        break;
+    case ZSTD_c_compressionLevel :
+        *value = CCtxParams->compressionLevel;
+        break;
+    case ZSTD_c_windowLog :
+        *value = (int)CCtxParams->cParams.windowLog;
+        break;
+    case ZSTD_c_hashLog :
+        *value = (int)CCtxParams->cParams.hashLog;
+        break;
+    case ZSTD_c_chainLog :
+        *value = (int)CCtxParams->cParams.chainLog;
+        break;
+    case ZSTD_c_searchLog :
+        *value = CCtxParams->cParams.searchLog;
+        break;
+    case ZSTD_c_minMatch :
+        *value = CCtxParams->cParams.minMatch;
+        break;
+    case ZSTD_c_targetLength :
+        *value = CCtxParams->cParams.targetLength;
+        break;
+    case ZSTD_c_strategy :
+        *value = (unsigned)CCtxParams->cParams.strategy;
+        break;
+    case ZSTD_c_contentSizeFlag :
+        *value = CCtxParams->fParams.contentSizeFlag;
+        break;
+    case ZSTD_c_checksumFlag :
+        *value = CCtxParams->fParams.checksumFlag;
+        break;
+    case ZSTD_c_dictIDFlag :
+        *value = !CCtxParams->fParams.noDictIDFlag;
+        break;
+    case ZSTD_c_forceMaxWindow :
+        *value = CCtxParams->forceWindow;
+        break;
+    case ZSTD_c_forceAttachDict :
+        *value = CCtxParams->attachDictPref;
+        break;
+    case ZSTD_c_literalCompressionMode :
+        *value = CCtxParams->literalCompressionMode;
+        break;
+    case ZSTD_c_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+        assert(CCtxParams->nbWorkers == 0);
+#endif
+        *value = CCtxParams->nbWorkers;
+        break;
+    case ZSTD_c_jobSize :
+#ifndef ZSTD_MULTITHREAD
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+        assert(CCtxParams->jobSize <= INT_MAX);
+        *value = (int)CCtxParams->jobSize;
+        break;
+#endif
+    case ZSTD_c_overlapLog :
+#ifndef ZSTD_MULTITHREAD
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+        *value = CCtxParams->overlapLog;
+        break;
+#endif
+    case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+#else
+        *value = CCtxParams->rsyncable;
+        break;
+#endif
+    case ZSTD_c_enableLongDistanceMatching :
+        *value = CCtxParams->ldmParams.enableLdm;
+        break;
+    case ZSTD_c_ldmHashLog :
+        *value = CCtxParams->ldmParams.hashLog;
+        break;
+    case ZSTD_c_ldmMinMatch :
+        *value = CCtxParams->ldmParams.minMatchLength;
+        break;
+    case ZSTD_c_ldmBucketSizeLog :
+        *value = CCtxParams->ldmParams.bucketSizeLog;
+        break;
+    case ZSTD_c_ldmHashRateLog :
+        *value = CCtxParams->ldmParams.hashRateLog;
+        break;
+    case ZSTD_c_targetCBlockSize :
+        *value = (int)CCtxParams->targetCBlockSize;
+        break;
+    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+    }
+    return 0;
+}
+
 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
  *  just applies `params` into `cctx`
  *  no action is performed, parameters are merely stored.
@@ -487,8 +804,9 @@
 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    if (cctx->cdict) return ERROR(stage_wrong);
+    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->cdict, stage_wrong);
 
     cctx->requestedParams = *params;
     return 0;
@@ -497,33 +815,71 @@
 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
     return 0;
 }
 
+/**
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+    ZSTD_localDict* const dl = &cctx->localDict;
+    ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
+            &cctx->requestedParams, 0, dl->dictSize);
+    if (dl->dict == NULL) {
+        /* No local dictionary. */
+        assert(dl->dictBuffer == NULL);
+        assert(dl->cdict == NULL);
+        assert(dl->dictSize == 0);
+        return 0;
+    }
+    if (dl->cdict != NULL) {
+        assert(cctx->cdict == dl->cdict);
+        /* Local dictionary already initialized. */
+        return 0;
+    }
+    assert(dl->dictSize > 0);
+    assert(cctx->cdict == NULL);
+    assert(cctx->prefixDict.dict == NULL);
+
+    dl->cdict = ZSTD_createCDict_advanced(
+            dl->dict,
+            dl->dictSize,
+            ZSTD_dlm_byRef,
+            dl->dictContentType,
+            cParams,
+            cctx->customMem);
+    RETURN_ERROR_IF(!dl->cdict, memory_allocation);
+    cctx->cdict = dl->cdict;
+    return 0;
+}
+
 size_t ZSTD_CCtx_loadDictionary_advanced(
         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+                    "no malloc for static CCtx");
     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
-    ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
-    if (dict==NULL || dictSize==0) {   /* no dictionary mode */
-        cctx->cdictLocal = NULL;
-        cctx->cdict = NULL;
+    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
+    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
+        return 0;
+    if (dictLoadMethod == ZSTD_dlm_byRef) {
+        cctx->localDict.dict = dict;
     } else {
-        ZSTD_compressionParameters const cParams =
-                ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
-        cctx->cdictLocal = ZSTD_createCDict_advanced(
-                                dict, dictSize,
-                                dictLoadMethod, dictContentType,
-                                cParams, cctx->customMem);
-        cctx->cdict = cctx->cdictLocal;
-        if (cctx->cdictLocal == NULL)
-            return ERROR(memory_allocation);
+        void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
+        RETURN_ERROR_IF(!dictBuffer, memory_allocation);
+        memcpy(dictBuffer, dict, dictSize);
+        cctx->localDict.dictBuffer = dictBuffer;
+        cctx->localDict.dict = dictBuffer;
     }
+    cctx->localDict.dictSize = dictSize;
+    cctx->localDict.dictContentType = dictContentType;
     return 0;
 }
 
@@ -543,9 +899,10 @@
 
 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    /* Free the existing local cdict (if any) to save memory. */
+    ZSTD_clearAllDicts(cctx);
     cctx->cdict = cdict;
-    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
     return 0;
 }
 
@@ -557,61 +914,67 @@
 size_t ZSTD_CCtx_refPrefix_advanced(
         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 {
-    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
-    cctx->cdict = NULL;   /* prefix discards any prior cdict */
+    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+    ZSTD_clearAllDicts(cctx);
     cctx->prefixDict.dict = prefix;
     cctx->prefixDict.dictSize = prefixSize;
     cctx->prefixDict.dictContentType = dictContentType;
     return 0;
 }
 
-static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
-{
-    cctx->streamStage = zcss_init;
-    cctx->pledgedSrcSizePlusOne = 0;
-}
-
 /*! ZSTD_CCtx_reset() :
  *  Also dumps dictionary */
-void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
 {
-    ZSTD_startNewCompression(cctx);
-    cctx->cdict = NULL;
+    if ( (reset == ZSTD_reset_session_only)
+      || (reset == ZSTD_reset_session_and_parameters) ) {
+        cctx->streamStage = zcss_init;
+        cctx->pledgedSrcSizePlusOne = 0;
+    }
+    if ( (reset == ZSTD_reset_parameters)
+      || (reset == ZSTD_reset_session_and_parameters) ) {
+        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+        ZSTD_clearAllDicts(cctx);
+        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+    }
+    return 0;
 }
 
+
 /** ZSTD_checkCParams() :
     control CParam values remain within authorized range.
     @return : 0, or an error code if one value is beyond authorized range */
 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
 {
-    CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
-    CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
-    CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
-    CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
-    CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
-    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN)
-        return ERROR(parameter_unsupported);
-    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
-        return ERROR(parameter_unsupported);
+    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
+    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
+    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
+    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
+    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
+    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
+    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
     return 0;
 }
 
 /** ZSTD_clampCParams() :
  *  make CParam values within valid range.
  *  @return : valid CParams */
-static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
 {
-#   define CLAMP(val,min,max) {      \
-        if (val<min) val=min;        \
-        else if (val>max) val=max;   \
+#   define CLAMP_TYPE(cParam, val, type) {                                \
+        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
+        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
+        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
     }
-    CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
-    CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
-    CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
-    CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
-    CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
-    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) cParams.targetLength = ZSTD_TARGETLENGTH_MIN;
-    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
+#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
+    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
+    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
+    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
+    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
     return cParams;
 }
 
@@ -624,17 +987,21 @@
 }
 
 /** ZSTD_adjustCParams_internal() :
-    optimize `cPar` for a given input (`srcSize` and `dictSize`).
-    mostly downsizing to reduce memory consumption and initialization latency.
-    Both `srcSize` and `dictSize` are optional (use 0 if unknown).
-    Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
-ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+ *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ *  mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ *  note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
+ *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+                            unsigned long long srcSize,
+                            size_t dictSize)
 {
     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
     assert(ZSTD_checkCParams(cPar)==0);
 
-    if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
+    if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
     else if (srcSize == 0)
         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
@@ -648,35 +1015,55 @@
                             ZSTD_highbit32(tSize-1) + 1;
         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
     }
-    if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
+    if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
         if (cycleLog > cPar.windowLog)
             cPar.chainLog -= (cycleLog - cPar.windowLog);
     }
 
     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
-        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
+        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
 
     return cPar;
 }
 
-ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+                   unsigned long long srcSize,
+                   size_t dictSize)
 {
-    cPar = ZSTD_clampCParams(cPar);
+    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
 }
 
-static size_t ZSTD_sizeof_matchState(ZSTD_compressionParameters const* cParams, const U32 forCCtx)
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+{
+    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
+    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
+    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
+    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
+    if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
+    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
+    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+    assert(!ZSTD_checkCParams(cParams));
+    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+                       const U32 forCCtx)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
-    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
     size_t const h3Size = ((size_t)1) << hashLog3;
     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
     size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
                           + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
-    size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) ||
-                                         (cParams->strategy == ZSTD_btultra)))
+    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
                                 ? optPotentialSpace
                                 : 0;
     DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
@@ -686,14 +1073,13 @@
 
 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
 {
-    /* Estimate CCtx size is supported for single-threaded compression only. */
-    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
+    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
     {   ZSTD_compressionParameters const cParams =
                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
-        U32    const divider = (cParams.searchLength==3) ? 3 : 4;
+        U32    const divider = (cParams.minMatch==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
         size_t const entropySpace = HUF_WORKSPACE_SIZE;
         size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
@@ -726,7 +1112,7 @@
 {
     int level;
     size_t memBudget = 0;
-    for (level=1; level<=compressionLevel; level++) {
+    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
         size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
         if (newMB > memBudget) memBudget = newMB;
     }
@@ -735,10 +1121,12 @@
 
 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
 {
-    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
-    {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
-        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
-        size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
+    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+    {   ZSTD_compressionParameters const cParams =
+                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+        size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+        size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
         size_t const streamingSize = inBuffSize + outBuffSize;
 
@@ -752,15 +1140,17 @@
     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
 }
 
-static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) {
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
     return ZSTD_estimateCStreamSize_usingCParams(cParams);
 }
 
-size_t ZSTD_estimateCStreamSize(int compressionLevel) {
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
     int level;
     size_t memBudget = 0;
-    for (level=1; level<=compressionLevel; level++) {
+    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
         size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
         if (newMB > memBudget) memBudget = newMB;
     }
@@ -786,9 +1176,27 @@
         fp.ingested = cctx->consumedSrcSize + buffered;
         fp.consumed = cctx->consumedSrcSize;
         fp.produced = cctx->producedCSize;
+        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
+        fp.currentJobID = 0;
+        fp.nbActiveWorkers = 0;
         return fp;
 }   }
 
+/*! ZSTD_toFlushNow()
+ *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+    if (cctx->appliedParams.nbWorkers > 0) {
+        return ZSTDMT_toFlushNow(cctx->mtctx);
+    }
+#endif
+    (void)cctx;
+    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+
 
 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
                                   ZSTD_compressionParameters cParams2)
@@ -796,7 +1204,21 @@
     return (cParams1.hashLog  == cParams2.hashLog)
          & (cParams1.chainLog == cParams2.chainLog)
          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
-         & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
+         & ((cParams1.minMatch==3) == (cParams2.minMatch==3));  /* hashlog3 space */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+                                    ZSTD_compressionParameters cParams2)
+{
+    (void)cParams1;
+    (void)cParams2;
+    assert(cParams1.windowLog    == cParams2.windowLog);
+    assert(cParams1.chainLog     == cParams2.chainLog);
+    assert(cParams1.hashLog      == cParams2.hashLog);
+    assert(cParams1.searchLog    == cParams2.searchLog);
+    assert(cParams1.minMatch     == cParams2.minMatch);
+    assert(cParams1.targetLength == cParams2.targetLength);
+    assert(cParams1.strategy     == cParams2.strategy);
 }
 
 /** The parameters are equivalent if ldm is not enabled in both sets or
@@ -809,7 +1231,7 @@
             ldmParams1.hashLog == ldmParams2.hashLog &&
             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
-            ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
+            ldmParams1.hashRateLog == ldmParams2.hashRateLog);
 }
 
 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
@@ -817,33 +1239,51 @@
 /* ZSTD_sufficientBuff() :
  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
-static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
+static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
+                            size_t maxNbLit1,
                             ZSTD_buffered_policy_e buffPol2,
                             ZSTD_compressionParameters cParams2,
                             U64 pledgedSrcSize)
 {
     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
+    size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
+    size_t const maxNbLit2 = blockSize2;
     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
-                (U32)windowSize2, cParams2.windowLog);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
-                (U32)blockSize2, (U32)blockSize1);
-    return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
+                (U32)neededBufferSize2, (U32)bufferSize1);
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
+                (U32)maxNbSeq2, (U32)maxNbSeq1);
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
+                (U32)maxNbLit2, (U32)maxNbLit1);
+    return (maxNbLit2 <= maxNbLit1)
+         & (maxNbSeq2 <= maxNbSeq1)
          & (neededBufferSize2 <= bufferSize1);
 }
 
 /** Equivalence for resetCCtx purposes */
 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
                                  ZSTD_CCtx_params params2,
-                                 size_t buffSize1, size_t blockSize1,
+                                 size_t buffSize1,
+                                 size_t maxNbSeq1, size_t maxNbLit1,
                                  ZSTD_buffered_policy_e buffPol2,
                                  U64 pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
-    return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
-           ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
-           ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
+    if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
+      DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
+      return 0;
+    }
+    if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
+      DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
+      return 0;
+    }
+    if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
+                             params2.cParams, pledgedSrcSize)) {
+      DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
+      return 0;
+    }
+    return 1;
 }
 
 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
@@ -851,23 +1291,24 @@
     int i;
     for (i = 0; i < ZSTD_REP_NUM; ++i)
         bs->rep[i] = repStartValue[i];
-    bs->entropy.hufCTable_repeatMode = HUF_repeat_none;
-    bs->entropy.offcode_repeatMode = FSE_repeat_none;
-    bs->entropy.matchlength_repeatMode = FSE_repeat_none;
-    bs->entropy.litlength_repeatMode = FSE_repeat_none;
+    bs->entropy.huf.repeatMode = HUF_repeat_none;
+    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
 }
 
 /*! ZSTD_invalidateMatchState()
- * Invalidate all the matches in the match finder tables.
- * Requires nextSrc and base to be set (can be NULL).
+ *  Invalidate all the matches in the match finder tables.
+ *  Requires nextSrc and base to be set (can be NULL).
  */
 static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
 {
     ZSTD_window_clear(&ms->window);
 
-    ms->nextToUpdate = ms->window.dictLimit + 1;
+    ms->nextToUpdate = ms->window.dictLimit;
     ms->loadedDictEnd = 0;
     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
+    ms->dictMatchState = NULL;
 }
 
 /*! ZSTD_continueCCtx() :
@@ -880,6 +1321,7 @@
 
     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
     cctx->appliedParams = params;
+    cctx->blockState.matchState.cParams = params.cParams;
     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
     cctx->consumedSrcSize = 0;
     cctx->producedCSize = 0;
@@ -900,11 +1342,17 @@
 
 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
 
-static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compressionParameters const* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
+
+static void*
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+                      void* ptr,
+                const ZSTD_compressionParameters* cParams,
+                      ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
-    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
     size_t const h3Size = ((size_t)1) << hashLog3;
     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
 
@@ -912,12 +1360,15 @@
 
     ms->hashLog3 = hashLog3;
     memset(&ms->window, 0, sizeof(ms->window));
+    ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
+    ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
+    ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
     ZSTD_invalidateMatchState(ms);
 
     /* opt parser space */
-    if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) {
+    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
         DEBUGLOG(4, "reserving optimal parser space");
-        ms->opt.litFreq = (U32*)ptr;
+        ms->opt.litFreq = (unsigned*)ptr;
         ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
         ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
         ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
@@ -937,14 +1388,37 @@
     ms->hashTable3 = ms->chainTable + chainSize;
     ptr = ms->hashTable3 + h3Size;
 
+    ms->cParams = *cParams;
+
     assert(((size_t)ptr & 3) == 0);
     return ptr;
 }
 
+/* ZSTD_indexTooCloseToMax() :
+ * minor optimization : prefer memset() rather than reduceIndex()
+ * which is measurably slow in some circumstances (reported for Visual Studio).
+ * Works when re-using a context for a lot of smallish inputs :
+ * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
+ * memset() will be triggered before reduceIndex().
+ */
+#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
+static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
+{
+    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
+}
+
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
+                                         * during at least this number of times,
+                                         * context's memory usage is considered wasteful,
+                                         * because it's sized to handle a worst case scenario which rarely happens.
+                                         * In which case, resize it down to free some memory */
+
 /*! ZSTD_resetCCtx_internal() :
     note : `params` are assumed fully validated at this stage */
 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
-                                      ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                      ZSTD_CCtx_params params,
+                                      U64 const pledgedSrcSize,
                                       ZSTD_compResetPolicy_e const crp,
                                       ZSTD_buffered_policy_e const zbuff)
 {
@@ -954,34 +1428,43 @@
 
     if (crp == ZSTDcrp_continue) {
         if (ZSTD_equivalentParams(zc->appliedParams, params,
-                                zc->inBuffSize, zc->blockSize,
-                                zbuff, pledgedSrcSize)) {
-            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)",
-                        zc->appliedParams.cParams.windowLog, (U32)zc->blockSize);
-            return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
-    }   }
+                                  zc->inBuffSize,
+                                  zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
+                                  zbuff, pledgedSrcSize) ) {
+            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
+            zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
+            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
+                DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
+                            zc->appliedParams.cParams.windowLog, zc->blockSize);
+                if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
+                    /* prefer a reset, faster than a rescale */
+                    ZSTD_reset_matchState(&zc->blockState.matchState,
+                                           zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
+                                          &params.cParams,
+                                           crp, ZSTD_resetTarget_CCtx);
+                }
+                return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
+    }   }   }
     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
 
     if (params.ldmParams.enableLdm) {
         /* Adjust long distance matching parameters */
-        params.ldmParams.windowLog = params.cParams.windowLog;
         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
-        assert(params.ldmParams.hashEveryLog < 32);
-        zc->ldmState.hashPower =
-                ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+        assert(params.ldmParams.hashRateLog < 32);
+        zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
     }
 
     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
-        U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
+        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
-        void* ptr;
+        void* ptr;   /* used to partition workSpace */
 
         /* Check if workSpace is large enough, alloc a new one if needed */
         {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
@@ -993,25 +1476,33 @@
             size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
                                        ldmSeqSpace + matchStateSize + tokenSpace +
                                        bufferSpace;
-            DEBUGLOG(4, "Need %uKB workspace, including %uKB for match state, and %uKB for buffers",
-                        (U32)(neededSpace>>10), (U32)(matchStateSize>>10), (U32)(bufferSpace>>10));
-            DEBUGLOG(4, "windowSize: %u - blockSize: %u", (U32)windowSize, (U32)blockSize);
 
-            if (zc->workSpaceSize < neededSpace) {  /* too small : resize */
-                DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK",
-                            (unsigned)(zc->workSpaceSize>>10),
-                            (unsigned)(neededSpace>>10));
-                /* static cctx : no resize, error out */
-                if (zc->staticSize) return ERROR(memory_allocation);
+            int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
+            int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
+            int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
+            zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
+
+            DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
+                        neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
+            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+            if (workSpaceTooSmall || workSpaceWasteful) {
+                DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
+                            zc->workSpaceSize >> 10,
+                            neededSpace >> 10);
+
+                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
 
                 zc->workSpaceSize = 0;
                 ZSTD_free(zc->workSpace, zc->customMem);
                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
-                if (zc->workSpace == NULL) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
                 zc->workSpaceSize = neededSpace;
-                ptr = zc->workSpace;
+                zc->workSpaceOversizedDuration = 0;
 
-                /* Statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+                /* Statically sized space.
+                 * entropyWorkspace never moves,
+                 * though prev/next block swap places */
                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
                 assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
                 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
@@ -1022,13 +1513,14 @@
 
         /* init params */
         zc->appliedParams = params;
+        zc->blockState.matchState.cParams = params.cParams;
         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
         zc->consumedSrcSize = 0;
         zc->producedCSize = 0;
         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
             zc->appliedParams.fParams.contentSizeFlag = 0;
         DEBUGLOG(4, "pledged content size : %u ; flag : %u",
-            (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
         zc->blockSize = blockSize;
 
         XXH64_reset(&zc->xxhState, 0);
@@ -1037,7 +1529,10 @@
 
         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
 
-        ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
+        ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
+                                     zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
+                                    &params.cParams,
+                                     crp, ZSTD_resetTarget_CCtx);
 
         /* ldm hash table */
         /* initialize bucketOffsets table later for pointer alignment */
@@ -1055,16 +1550,19 @@
         }
         assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
 
-        ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
-
         /* sequences storage */
+        zc->seqStore.maxNbSeq = maxNbSeq;
         zc->seqStore.sequencesStart = (seqDef*)ptr;
         ptr = zc->seqStore.sequencesStart + maxNbSeq;
         zc->seqStore.llCode = (BYTE*) ptr;
         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-        ptr = zc->seqStore.litStart + blockSize;
+        /* ZSTD_wildcopy() is used to copy into the literals buffer,
+         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+         */
+        zc->seqStore.maxNbLit = blockSize;
+        ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
 
         /* ldm bucketOffsets table */
         if (params.ldmParams.enableLdm) {
@@ -1098,28 +1596,109 @@
     assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
 }
 
-static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+    8 KB,  /* unused */
+    8 KB,  /* ZSTD_fast */
+    16 KB, /* ZSTD_dfast */
+    32 KB, /* ZSTD_greedy */
+    32 KB, /* ZSTD_lazy */
+    32 KB, /* ZSTD_lazy2 */
+    32 KB, /* ZSTD_btlazy2 */
+    32 KB, /* ZSTD_btopt */
+    8 KB,  /* ZSTD_btultra */
+    8 KB   /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+                                 ZSTD_CCtx_params params,
+                                 U64 pledgedSrcSize)
+{
+    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+    return ( pledgedSrcSize <= cutoff
+          || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+          || params.attachDictPref == ZSTD_dictForceAttach )
+        && params.attachDictPref != ZSTD_dictForceCopy
+        && !params.forceWindow; /* dictMatchState isn't correctly
+                                 * handled in _enforceMaxDist */
+}
+
+static size_t
+ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+                        const ZSTD_CDict* cdict,
+                        ZSTD_CCtx_params params,
+                        U64 pledgedSrcSize,
+                        ZSTD_buffered_policy_e zbuff)
+{
+    {   const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;
+        unsigned const windowLog = params.cParams.windowLog;
+        assert(windowLog != 0);
+        /* Resize working context table params for input only, since the dict
+         * has its own tables. */
+        params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
+        params.cParams.windowLog = windowLog;
+        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                ZSTDcrp_continue, zbuff);
+        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+    }
+
+    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+                                  - cdict->matchState.window.base);
+        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+        if (cdictLen == 0) {
+            /* don't even attach dictionaries with no contents */
+            DEBUGLOG(4, "skipping attaching empty dictionary");
+        } else {
+            DEBUGLOG(4, "attaching dictionary into context");
+            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+            /* prep working match state so dict matches never have negative indices
+             * when they are translated to the working context's index space. */
+            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+                cctx->blockState.matchState.window.nextSrc =
+                    cctx->blockState.matchState.window.base + cdictEnd;
+                ZSTD_window_clear(&cctx->blockState.matchState.window);
+            }
+            /* loadedDictEnd is expressed within the referential of the active context */
+            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+    }   }
+
+    cctx->dictID = cdict->dictID;
+
+    /* copy block state */
+    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+    return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
                             const ZSTD_CDict* cdict,
-                            unsigned windowLog,
-                            ZSTD_frameParameters fParams,
+                            ZSTD_CCtx_params params,
                             U64 pledgedSrcSize,
                             ZSTD_buffered_policy_e zbuff)
 {
-    {   ZSTD_CCtx_params params = cctx->requestedParams;
+    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+    DEBUGLOG(4, "copying dictionary into context");
+
+    {   unsigned const windowLog = params.cParams.windowLog;
+        assert(windowLog != 0);
         /* Copy only compression parameters related to tables. */
-        params.cParams = cdict->cParams;
-        if (windowLog) params.cParams.windowLog = windowLog;
-        params.fParams = fParams;
+        params.cParams = *cdict_cParams;
+        params.cParams.windowLog = windowLog;
         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
                                 ZSTDcrp_noMemset, zbuff);
-        assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy);
-        assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog);
-        assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog);
+        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
     }
 
     /* copy tables */
-    {   size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog);
-        size_t const hSize =  (size_t)1 << cdict->cParams.hashLog;
+    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
         size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
         assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
         assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
@@ -1127,6 +1706,7 @@
         assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
         memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
     }
+
     /* Zero the hashTable3, since the cdict never fills it */
     {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
         assert(cdict->matchState.hashLog3 == 0);
@@ -1134,14 +1714,13 @@
     }
 
     /* copy dictionary offsets */
-    {
-        ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
     }
+
     cctx->dictID = cdict->dictID;
 
     /* copy block state */
@@ -1150,6 +1729,28 @@
     return 0;
 }
 
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+                            const ZSTD_CDict* cdict,
+                            ZSTD_CCtx_params params,
+                            U64 pledgedSrcSize,
+                            ZSTD_buffered_policy_e zbuff)
+{
+
+    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+                (unsigned)pledgedSrcSize);
+
+    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+        return ZSTD_resetCCtx_byAttachingCDict(
+            cctx, cdict, params, pledgedSrcSize, zbuff);
+    } else {
+        return ZSTD_resetCCtx_byCopyingCDict(
+            cctx, cdict, params, pledgedSrcSize, zbuff);
+    }
+}
+
 /*! ZSTD_copyCCtx_internal() :
  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
@@ -1164,7 +1765,7 @@
                             ZSTD_buffered_policy_e zbuff)
 {
     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
-    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
+    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
 
     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
@@ -1192,11 +1793,10 @@
 
     /* copy dictionary offsets */
     {
-        ZSTD_matchState_t const* srcMatchState = &srcCCtx->blockState.matchState;
+        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
-        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
     }
     dstCCtx->dictID = srcCCtx->dictID;
@@ -1266,16 +1866,15 @@
 
 /*! ZSTD_reduceIndex() :
 *   rescale all indexes to avoid future overflow (indexes are U32) */
-static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
+static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
 {
-    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
+    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
         ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
     }
 
-    if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
-        U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
-        if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
+    if (params->cParams.strategy != ZSTD_fast) {
+        U32 const chainSize = (U32)1 << params->cParams.chainLog;
+        if (params->cParams.strategy == ZSTD_btlazy2)
             ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
         else
             ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
@@ -1294,21 +1893,22 @@
 
 /* See doc/zstd_compression_format.md for detailed format description */
 
-size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
 {
-    if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+                    dstSize_tooSmall);
+    MEM_writeLE24(dst, cBlockHeader24);
     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
-    MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
-    return ZSTD_blockHeaderSize+srcSize;
+    return ZSTD_blockHeaderSize + srcSize;
 }
 
-
 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
     BYTE* const ostart = (BYTE* const)dst;
     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
 
-    if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
 
     switch(flSize)
     {
@@ -1356,16 +1956,27 @@
 }
 
 
-static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+    return (srcSize >> minlog) + 2;
+}
 
-static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
-                                     ZSTD_entropyCTables_t* nextEntropy,
+static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+                                     ZSTD_hufCTables_t* nextHuf,
                                      ZSTD_strategy strategy, int disableLiteralCompression,
                                      void* dst, size_t dstCapacity,
                                const void* src, size_t srcSize,
-                                     U32* workspace, const int bmi2)
+                                     void* workspace, size_t wkspSize,
+                               const int bmi2)
 {
-    size_t const minGain = ZSTD_minGain(srcSize);
+    size_t const minGain = ZSTD_minGain(srcSize, strategy);
     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
     BYTE*  const ostart = (BYTE*)dst;
     U32 singleStream = srcSize < 256;
@@ -1376,27 +1987,25 @@
                 disableLiteralCompression);
 
     /* Prepare nextEntropy assuming reusing the existing table */
-    nextEntropy->hufCTable_repeatMode = prevEntropy->hufCTable_repeatMode;
-    memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable,
-           sizeof(prevEntropy->hufCTable));
+    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
 
     if (disableLiteralCompression)
         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
 
     /* small ? don't even attempt compression (speed opt) */
 #   define COMPRESS_LITERALS_SIZE_MIN 63
-    {   size_t const minLitSize = (prevEntropy->hufCTable_repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     }
 
-    if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
-    {   HUF_repeat repeat = prevEntropy->hufCTable_repeatMode;
+    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+    {   HUF_repeat repeat = prevHuf->repeatMode;
         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2)
+                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2);
+                                      workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
         if (repeat != HUF_repeat_none) {
             /* reused the existing table */
             hType = set_repeat;
@@ -1404,17 +2013,17 @@
     }
 
     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
-        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     }
     if (cLitSize==1) {
-        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
     }
 
     if (hType == set_compressed) {
         /* using a newly constructed table */
-        nextEntropy->hufCTable_repeatMode = HUF_repeat_check;
+        nextHuf->repeatMode = HUF_repeat_check;
     }
 
     /* Build header */
@@ -1451,6 +2060,7 @@
     BYTE* const mlCodeTable = seqStorePtr->mlCode;
     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
     U32 u;
+    assert(nbSeq <= seqStorePtr->maxNbSeq);
     for (u=0; u<nbSeq; u++) {
         U32 const llv = sequences[u].litLength;
         U32 const mlv = sequences[u].matchLength;
@@ -1464,72 +2074,243 @@
         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
 }
 
+
+/**
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
+    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
+    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
+    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
+    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
+    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
+    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
+    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
+    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
+    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
+    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
+    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
+    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
+    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
+    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
+    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
+    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
+    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
+    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
+    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
+    5,    4,    2,    1,
+};
+
+
+/**
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+    unsigned cost = 0;
+    unsigned s;
+    for (s = 0; s <= max; ++s) {
+        unsigned norm = (unsigned)((256 * count[s]) / total);
+        if (count[s] != 0 && norm == 0)
+            norm = 1;
+        assert(count[s] < total);
+        cost += count[s] * kInverseProbabilityLog256[norm];
+    }
+    return cost >> 8;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+                                    unsigned const* count, unsigned const max)
+{
+    unsigned const shift = 8 - accuracyLog;
+    size_t cost = 0;
+    unsigned s;
+    assert(accuracyLog <= 8);
+    for (s = 0; s <= max; ++s) {
+        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
+        unsigned const norm256 = normAcc << shift;
+        assert(norm256 > 0);
+        assert(norm256 < 256);
+        cost += count[s] * kInverseProbabilityLog256[norm256];
+    }
+    return cost >> 8;
+}
+
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+  void const* ptr = ctable;
+  U16 const* u16ptr = (U16 const*)ptr;
+  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+  return maxSymbolValue;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+static size_t ZSTD_fseBitCost(
+    FSE_CTable const* ctable,
+    unsigned const* count,
+    unsigned const max)
+{
+    unsigned const kAccuracyLog = 8;
+    size_t cost = 0;
+    unsigned s;
+    FSE_CState_t cstate;
+    FSE_initCState(&cstate, ctable);
+    RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
+                    "Repeat FSE_CTable has maxSymbolValue %u < %u",
+                    ZSTD_getFSEMaxSymbolValue(ctable), max);
+    for (s = 0; s <= max; ++s) {
+        unsigned const tableLog = cstate.stateLog;
+        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+        if (count[s] == 0)
+            continue;
+        RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
+                        "Repeat FSE_CTable has Prob[%u] == 0", s);
+        cost += count[s] * bitCost;
+    }
+    return cost >> kAccuracyLog;
+}
+
+/**
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+                              size_t const nbSeq, unsigned const FSELog)
+{
+    BYTE wksp[FSE_NCOUNTBOUND];
+    S16 norm[MaxSeq + 1];
+    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+
 typedef enum {
     ZSTD_defaultDisallowed = 0,
     ZSTD_defaultAllowed = 1
 } ZSTD_defaultPolicy_e;
 
-MEM_STATIC
-symbolEncodingType_e ZSTD_selectEncodingType(
-        FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
-        U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
+MEM_STATIC symbolEncodingType_e
+ZSTD_selectEncodingType(
+        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+        FSE_CTable const* prevCTable,
+        short const* defaultNorm, U32 defaultNormLog,
+        ZSTD_defaultPolicy_e const isDefaultAllowed,
+        ZSTD_strategy const strategy)
 {
-#define MIN_SEQ_FOR_DYNAMIC_FSE   64
-#define MAX_SEQ_FOR_STATIC_FSE  1000
     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
-    if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
+    if (mostFrequent == nbSeq) {
+        *repeatMode = FSE_repeat_none;
+        if (isDefaultAllowed && nbSeq <= 2) {
+            /* Prefer set_basic over set_rle when there are 2 or less symbols,
+             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+             * If basic encoding isn't possible, always choose RLE.
+             */
+            DEBUGLOG(5, "Selected set_basic");
+            return set_basic;
+        }
         DEBUGLOG(5, "Selected set_rle");
-        /* Prefer set_basic over set_rle when there are 2 or less symbols,
-         * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
-         * If basic encoding isn't possible, always choose RLE.
-         */
-        *repeatMode = FSE_repeat_check;
         return set_rle;
     }
-    if ( isDefaultAllowed
-      && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
-        DEBUGLOG(5, "Selected set_repeat");
-        return set_repeat;
-    }
-    if ( isDefaultAllowed
-      && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) {
-        DEBUGLOG(5, "Selected set_basic");
-        /* The format allows default tables to be repeated, but it isn't useful.
-         * When using simple heuristics to select encoding type, we don't want
-         * to confuse these tables with dictionaries. When running more careful
-         * analysis, we don't need to waste time checking both repeating tables
-         * and default tables.
-         */
-        *repeatMode = FSE_repeat_none;
-        return set_basic;
+    if (strategy < ZSTD_lazy) {
+        if (isDefaultAllowed) {
+            size_t const staticFse_nbSeq_max = 1000;
+            size_t const mult = 10 - strategy;
+            size_t const baseLog = 3;
+            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
+            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
+            assert(mult <= 9 && mult >= 7);
+            if ( (*repeatMode == FSE_repeat_valid)
+              && (nbSeq < staticFse_nbSeq_max) ) {
+                DEBUGLOG(5, "Selected set_repeat");
+                return set_repeat;
+            }
+            if ( (nbSeq < dynamicFse_nbSeq_min)
+              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+                DEBUGLOG(5, "Selected set_basic");
+                /* The format allows default tables to be repeated, but it isn't useful.
+                 * When using simple heuristics to select encoding type, we don't want
+                 * to confuse these tables with dictionaries. When running more careful
+                 * analysis, we don't need to waste time checking both repeating tables
+                 * and default tables.
+                 */
+                *repeatMode = FSE_repeat_none;
+                return set_basic;
+            }
+        }
+    } else {
+        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+        if (isDefaultAllowed) {
+            assert(!ZSTD_isError(basicCost));
+            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+        }
+        assert(!ZSTD_isError(NCountCost));
+        assert(compressedCost < ERROR(maxCode));
+        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+        if (basicCost <= repeatCost && basicCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_basic");
+            assert(isDefaultAllowed);
+            *repeatMode = FSE_repeat_none;
+            return set_basic;
+        }
+        if (repeatCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_repeat");
+            assert(!ZSTD_isError(repeatCost));
+            return set_repeat;
+        }
+        assert(compressedCost < basicCost && compressedCost < repeatCost);
     }
     DEBUGLOG(5, "Selected set_compressed");
     *repeatMode = FSE_repeat_check;
     return set_compressed;
 }
 
-MEM_STATIC
-size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
-        FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
-        U32* count, U32 max,
-        BYTE const* codeTable, size_t nbSeq,
-        S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
-        FSE_CTable const* prevCTable, size_t prevCTableSize,
-        void* workspace, size_t workspaceSize)
+MEM_STATIC size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+                unsigned* count, U32 max,
+                const BYTE* codeTable, size_t nbSeq,
+                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+                const FSE_CTable* prevCTable, size_t prevCTableSize,
+                void* workspace, size_t workspaceSize)
 {
     BYTE* op = (BYTE*)dst;
-    BYTE const* const oend = op + dstCapacity;
+    const BYTE* const oend = op + dstCapacity;
+    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
 
     switch (type) {
     case set_rle:
+        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
         *op = codeTable[0];
-        CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
         return 1;
     case set_repeat:
         memcpy(nextCTable, prevCTable, prevCTableSize);
         return 0;
     case set_basic:
-        CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
+        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
         return 0;
     case set_compressed: {
         S16 norm[MaxSeq + 1];
@@ -1540,14 +2321,14 @@
             nbSeq_1--;
         }
         assert(nbSeq_1 > 1);
-        CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
+        FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
-            if (FSE_isError(NCountSize)) return NCountSize;
-            CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+            FORWARD_IF_ERROR(NCountSize);
+            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
             return NCountSize;
         }
     }
-    default: return assert(0), ERROR(GENERIC);
+    default: assert(0); RETURN_ERROR(GENERIC);
     }
 }
 
@@ -1564,7 +2345,12 @@
     FSE_CState_t  stateOffsetBits;
     FSE_CState_t  stateLitLength;
 
-    CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
+    RETURN_ERROR_IF(
+        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+        dstSize_tooSmall, "not enough space remaining");
+    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
+                (int)(blockStream.endPtr - blockStream.startPtr),
+                (unsigned)dstCapacity);
 
     /* first symbols */
     FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
@@ -1597,9 +2383,9 @@
             U32  const ofBits = ofCode;
             U32  const mlBits = ML_bits[mlCode];
             DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
-                        sequences[n].litLength,
-                        sequences[n].matchLength + MINMATCH,
-                        sequences[n].offset);
+                        (unsigned)sequences[n].litLength,
+                        (unsigned)sequences[n].matchLength + MINMATCH,
+                        (unsigned)sequences[n].offset);
                                                                             /* 32b*/  /* 64b*/
                                                                             /* (7)*/  /* (7)*/
             FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
@@ -1624,6 +2410,7 @@
                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
             }
             BIT_flushBits(&blockStream);                                    /* (7)*/
+            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
     }   }
 
     DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
@@ -1634,7 +2421,7 @@
     FSE_flushCState(&blockStream, &stateLitLength);
 
     {   size_t const streamSize = BIT_closeCStream(&blockStream);
-        if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
+        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
         return streamSize;
     }
 }
@@ -1674,13 +2461,14 @@
 
 #endif
 
-size_t ZSTD_encodeSequences(
+static size_t ZSTD_encodeSequences(
             void* dst, size_t dstCapacity,
             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
             seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
 {
+    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
 #if DYNAMIC_BMI2
     if (bmi2) {
         return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
@@ -1698,18 +2486,38 @@
                                         sequences, nbSeq, longOffsets);
 }
 
-MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
-                              ZSTD_entropyCTables_t const* prevEntropy,
-                              ZSTD_entropyCTables_t* nextEntropy,
-                              ZSTD_CCtx_params const* cctxParams,
-                              void* dst, size_t dstCapacity, U32* workspace,
-                              const int bmi2)
+static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
+{
+    switch (cctxParams->literalCompressionMode) {
+    case ZSTD_lcm_huffman:
+        return 0;
+    case ZSTD_lcm_uncompressed:
+        return 1;
+    default:
+        assert(0 /* impossible: pre-validated */);
+        /* fall-through */
+    case ZSTD_lcm_auto:
+        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+    }
+}
+
+/* ZSTD_compressSequences_internal():
+ * actually compresses both literals and sequences */
+MEM_STATIC size_t
+ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
+                          const ZSTD_entropyCTables_t* prevEntropy,
+                                ZSTD_entropyCTables_t* nextEntropy,
+                          const ZSTD_CCtx_params* cctxParams,
+                                void* dst, size_t dstCapacity,
+                                void* workspace, size_t wkspSize,
+                          const int bmi2)
 {
     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
-    U32 count[MaxSeq+1];
-    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
-    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
-    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+    unsigned count[MaxSeq+1];
+    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
     const seqDef* const sequences = seqStorePtr->sequencesStart;
     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
@@ -1720,87 +2528,114 @@
     BYTE* op = ostart;
     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
     BYTE* seqHead;
+    BYTE* lastNCount = NULL;
 
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+    DEBUGLOG(5, "ZSTD_compressSequences_internal");
 
     /* Compress literals */
     {   const BYTE* const literals = seqStorePtr->litStart;
         size_t const litSize = seqStorePtr->lit - literals;
         size_t const cSize = ZSTD_compressLiterals(
-                                    prevEntropy, nextEntropy,
-                                    cctxParams->cParams.strategy, cctxParams->disableLiteralCompression,
+                                    &prevEntropy->huf, &nextEntropy->huf,
+                                    cctxParams->cParams.strategy,
+                                    ZSTD_disableLiteralsCompression(cctxParams),
                                     op, dstCapacity,
                                     literals, litSize,
-                                    workspace, bmi2);
-        if (ZSTD_isError(cSize))
-          return cSize;
+                                    workspace, wkspSize,
+                                    bmi2);
+        FORWARD_IF_ERROR(cSize);
         assert(cSize <= dstCapacity);
         op += cSize;
     }
 
     /* Sequences Header */
-    if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+                    dstSize_tooSmall);
     if (nbSeq < 0x7F)
         *op++ = (BYTE)nbSeq;
     else if (nbSeq < LONGNBSEQ)
         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
     else
         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+    assert(op <= oend);
     if (nbSeq==0) {
-      memcpy(nextEntropy->litlengthCTable, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable));
-      nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
-      memcpy(nextEntropy->offcodeCTable, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable));
-      nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
-      memcpy(nextEntropy->matchlengthCTable, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable));
-      nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
-      return op - ostart;
+        /* Copy the old tables over as if we repeated them */
+        memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+        return op - ostart;
     }
 
     /* seqHead : flags for FSE encoding type */
     seqHead = op++;
+    assert(op <= oend);
 
     /* convert length/distances into codes */
     ZSTD_seqToCodes(seqStorePtr);
     /* build CTable for Literal Lengths */
-    {   U32 max = MaxLL;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
+    {   unsigned max = MaxLL;
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
         DEBUGLOG(5, "Building LL table");
-        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
-        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
+        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
+        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
+                                        count, max, mostFrequent, nbSeq,
+                                        LLFSELog, prevEntropy->fse.litlengthCTable,
+                                        LL_defaultNorm, LL_defaultNormLog,
+                                        ZSTD_defaultAllowed, strategy);
+        assert(set_basic < set_compressed && set_rle < set_compressed);
+        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
-                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
-                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
-            if (ZSTD_isError(countSize)) return countSize;
+                                                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+                                                    prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
+                                                    workspace, wkspSize);
+            FORWARD_IF_ERROR(countSize);
+            if (LLtype == set_compressed)
+                lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
     /* build CTable for Offsets */
-    {   U32 max = MaxOff;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
+    {   unsigned max = MaxOff;
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
         DEBUGLOG(5, "Building OF table");
-        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
-        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
+        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
+        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
+                                        count, max, mostFrequent, nbSeq,
+                                        OffFSELog, prevEntropy->fse.offcodeCTable,
+                                        OF_defaultNorm, OF_defaultNormLog,
+                                        defaultPolicy, strategy);
+        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
-                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
-                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
-            if (ZSTD_isError(countSize)) return countSize;
+                                                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+                                                    prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
+                                                    workspace, wkspSize);
+            FORWARD_IF_ERROR(countSize);
+            if (Offtype == set_compressed)
+                lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
     /* build CTable for MatchLengths */
-    {   U32 max = MaxML;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
-        DEBUGLOG(5, "Building ML table");
-        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
-        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
+    {   unsigned max = MaxML;
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
+        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
+        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
+                                        count, max, mostFrequent, nbSeq,
+                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
+                                        ML_defaultNorm, ML_defaultNormLog,
+                                        ZSTD_defaultAllowed, strategy);
+        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
-                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
-                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
-            if (ZSTD_isError(countSize)) return countSize;
+                                                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+                                                    prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
+                                                    workspace, wkspSize);
+            FORWARD_IF_ERROR(countSize);
+            if (MLtype == set_compressed)
+                lastNCount = op;
             op += countSize;
+            assert(op <= oend);
     }   }
 
     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
@@ -1812,65 +2647,104 @@
                                         CTable_LitLength, llCodeTable,
                                         sequences, nbSeq,
                                         longOffsets, bmi2);
-        if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
+        FORWARD_IF_ERROR(bitstreamSize);
         op += bitstreamSize;
+        assert(op <= oend);
+        /* zstd versions <= 1.3.4 mistakenly report corruption when
+         * FSE_readNCount() receives a buffer < 4 bytes.
+         * Fixed by https://github.com/facebook/zstd/pull/1146.
+         * This can happen when the last set_compressed table present is 2
+         * bytes and the bitstream is only one byte.
+         * In this exceedingly rare case, we will simply emit an uncompressed
+         * block, since it isn't worth optimizing.
+         */
+        if (lastNCount && (op - lastNCount) < 4) {
+            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+            assert(op - lastNCount == 3);
+            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+                        "emitting an uncompressed block.");
+            return 0;
+        }
     }
 
+    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
     return op - ostart;
 }
 
-MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
-                              ZSTD_entropyCTables_t const* prevEntropy,
-                              ZSTD_entropyCTables_t* nextEntropy,
-                              ZSTD_CCtx_params const* cctxParams,
-                              void* dst, size_t dstCapacity,
-                              size_t srcSize, U32* workspace, int bmi2)
+MEM_STATIC size_t
+ZSTD_compressSequences(seqStore_t* seqStorePtr,
+                       const ZSTD_entropyCTables_t* prevEntropy,
+                             ZSTD_entropyCTables_t* nextEntropy,
+                       const ZSTD_CCtx_params* cctxParams,
+                             void* dst, size_t dstCapacity,
+                             size_t srcSize,
+                             void* workspace, size_t wkspSize,
+                             int bmi2)
 {
     size_t const cSize = ZSTD_compressSequences_internal(
-            seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
-            workspace, bmi2);
+                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+                            dst, dstCapacity,
+                            workspace, wkspSize, bmi2);
+    if (cSize == 0) return 0;
     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
      */
     if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
         return 0;  /* block not compressed */
-    if (ZSTD_isError(cSize)) return cSize;
+    FORWARD_IF_ERROR(cSize);
 
     /* Check compressibility */
-    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize);  /* note : fixed formula, maybe should depend on compression level, or strategy */
+    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
         if (cSize >= maxCSize) return 0;  /* block not compressed */
     }
 
-    /* We check that dictionaries have offset codes available for the first
-     * block. After the first block, the offcode table might not have large
-     * enough codes to represent the offsets in the data.
-     */
-    if (nextEntropy->offcode_repeatMode == FSE_repeat_valid)
-        nextEntropy->offcode_repeatMode = FSE_repeat_check;
-
     return cSize;
 }
 
 /* ZSTD_selectBlockCompressor() :
  * Not static, but internal use only (used by long distance matcher)
  * assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
 {
-    static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
+    static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
         { ZSTD_compressBlock_fast  /* default for 0 */,
-          ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
-          ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
-          ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
+          ZSTD_compressBlock_fast,
+          ZSTD_compressBlock_doubleFast,
+          ZSTD_compressBlock_greedy,
+          ZSTD_compressBlock_lazy,
+          ZSTD_compressBlock_lazy2,
+          ZSTD_compressBlock_btlazy2,
+          ZSTD_compressBlock_btopt,
+          ZSTD_compressBlock_btultra,
+          ZSTD_compressBlock_btultra2 },
         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
-          ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
-          ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
-          ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
+          ZSTD_compressBlock_fast_extDict,
+          ZSTD_compressBlock_doubleFast_extDict,
+          ZSTD_compressBlock_greedy_extDict,
+          ZSTD_compressBlock_lazy_extDict,
+          ZSTD_compressBlock_lazy2_extDict,
+          ZSTD_compressBlock_btlazy2_extDict,
+          ZSTD_compressBlock_btopt_extDict,
+          ZSTD_compressBlock_btultra_extDict,
+          ZSTD_compressBlock_btultra_extDict },
+        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
+          ZSTD_compressBlock_fast_dictMatchState,
+          ZSTD_compressBlock_doubleFast_dictMatchState,
+          ZSTD_compressBlock_greedy_dictMatchState,
+          ZSTD_compressBlock_lazy_dictMatchState,
+          ZSTD_compressBlock_lazy2_dictMatchState,
+          ZSTD_compressBlock_btlazy2_dictMatchState,
+          ZSTD_compressBlock_btopt_dictMatchState,
+          ZSTD_compressBlock_btultra_dictMatchState,
+          ZSTD_compressBlock_btultra_dictMatchState }
     };
+    ZSTD_blockCompressor selectedCompressor;
     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
 
-    assert((U32)strat >= (U32)ZSTD_fast);
-    assert((U32)strat <= (U32)ZSTD_btultra);
-    return blockCompressor[extDict!=0][(U32)strat];
+    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+    assert(selectedCompressor != NULL);
+    return selectedCompressor;
 }
 
 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
@@ -1880,36 +2754,47 @@
     seqStorePtr->lit += lastLLSize;
 }
 
-static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
 {
     ssPtr->lit = ssPtr->litStart;
     ssPtr->sequences = ssPtr->sequencesStart;
     ssPtr->longLengthID = 0;
 }
 
-static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
-                                        void* dst, size_t dstCapacity,
-                                        const void* src, size_t srcSize)
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+
+static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
 {
     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
-                (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
+    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+    /* Assert that we have correctly flushed the ctx params into the ms's copy */
+    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
-        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
-        return 0;   /* don't even attempt compression below a certain srcSize */
+        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
     }
     ZSTD_resetSeqStore(&(zc->seqStore));
+    /* required for optimal parser to read stats from dictionary */
+    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+    /* tell the optimal parser how we expect to compress literals */
+    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
+    /* a gap between an attached dict and the current window is not safe,
+     * they must remain adjacent,
+     * and when that stops being the case, the dict must be unset */
+    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
 
     /* limited update after a very long match */
     {   const BYTE* const base = ms->window.base;
         const BYTE* const istart = (const BYTE*)src;
         const U32 current = (U32)(istart-base);
+        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
         if (current > ms->nextToUpdate + 384)
             ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
     }
 
     /* select and store sequences */
-    {   U32 const extDict = ZSTD_window_hasExtDict(ms->window);
+    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
         size_t lastLLSize;
         {   int i;
             for (i = 0; i < ZSTD_REP_NUM; ++i)
@@ -1922,8 +2807,7 @@
                 ZSTD_ldm_blockCompress(&zc->externSeqStore,
                                        ms, &zc->seqStore,
                                        zc->blockState.nextCBlock->rep,
-                                       &zc->appliedParams.cParams,
-                                       src, srcSize, extDict);
+                                       src, srcSize);
             assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
         } else if (zc->appliedParams.ldmParams.enableLdm) {
             rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
@@ -1931,7 +2815,7 @@
             ldmSeqStore.seq = zc->ldmSequences;
             ldmSeqStore.capacity = zc->maxNbLdmSequences;
             /* Updates ldmSeqStore.size */
-            CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
                                                &zc->appliedParams.ldmParams,
                                                src, srcSize));
             /* Updates ldmSeqStore.pos */
@@ -1939,30 +2823,73 @@
                 ZSTD_ldm_blockCompress(&ldmSeqStore,
                                        ms, &zc->seqStore,
                                        zc->blockState.nextCBlock->rep,
-                                       &zc->appliedParams.cParams,
-                                       src, srcSize, extDict);
+                                       src, srcSize);
             assert(ldmSeqStore.pos == ldmSeqStore.size);
         } else {   /* not long range mode */
-            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
-            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize);
+            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
         }
         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
     }   }
+    return ZSTDbss_compress;
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+                                        void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize)
+{
+    size_t cSize;
+    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate);
+
+    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+        FORWARD_IF_ERROR(bss);
+        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+    }
 
     /* encode sequences and literals */
-    {   size_t const cSize = ZSTD_compressSequences(&zc->seqStore,
-                                &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
-                                &zc->appliedParams,
-                                dst, dstCapacity,
-                                srcSize, zc->entropyWorkspace, zc->bmi2);
-        if (ZSTD_isError(cSize) || cSize == 0) return cSize;
-        /* confirm repcodes and entropy tables */
-        {   ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
-            zc->blockState.prevCBlock = zc->blockState.nextCBlock;
-            zc->blockState.nextCBlock = tmp;
-        }
-        return cSize;
+    cSize = ZSTD_compressSequences(&zc->seqStore,
+            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+            &zc->appliedParams,
+            dst, dstCapacity,
+            srcSize,
+            zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+            zc->bmi2);
+
+out:
+    if (!ZSTD_isError(cSize) && cSize != 0) {
+        /* confirm repcodes and entropy tables when emitting a compressed block */
+        ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+        zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+        zc->blockState.nextCBlock = tmp;
+    }
+    /* We check that dictionaries have offset codes available for the first
+     * block. After the first block, the offcode table might not have large
+     * enough codes to represent the offsets in the data.
+     */
+    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+    return cSize;
+}
+
+
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
+{
+    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
+        U32 const maxDist = (U32)1 << params->cParams.windowLog;
+        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+        ZSTD_reduceIndex(ms, params, correction);
+        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+        else ms->nextToUpdate -= correction;
+        /* invalidate dictionaries on overflow correction */
+        ms->loadedDictEnd = 0;
+        ms->dictMatchState = NULL;
     }
 }
 
@@ -1985,9 +2912,9 @@
     BYTE* const ostart = (BYTE*)dst;
     BYTE* op = ostart;
     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
-    assert(cctx->appliedParams.cParams.windowLog <= 31);
+    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
 
-    DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
+    DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
         XXH64_update(&cctx->xxhState, src, srcSize);
 
@@ -1995,36 +2922,25 @@
         ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
 
-        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
-            return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
+        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+                        dstSize_tooSmall,
+                        "not enough space to store compressed block");
         if (remaining < blockSize) blockSize = remaining;
 
-        if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
-            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
-            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
-            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
-            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
+        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
 
-            ZSTD_reduceIndex(cctx, correction);
-            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
-            else ms->nextToUpdate -= correction;
-            ms->loadedDictEnd = 0;
-        }
-        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd);
+        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
 
         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
                                 ip, blockSize);
-            if (ZSTD_isError(cSize)) return cSize;
+            FORWARD_IF_ERROR(cSize);
 
             if (cSize == 0) {  /* block is not compressible */
-                U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
-                if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
-                MEM_writeLE32(op, cBlockHeader24);   /* 4th byte will be overwritten */
-                memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
-                cSize = ZSTD_blockHeaderSize + blockSize;
+                cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+                FORWARD_IF_ERROR(cSize);
             } else {
                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
                 MEM_writeLE24(op, cBlockHeader24);
@@ -2038,11 +2954,11 @@
             assert(dstCapacity >= cSize);
             dstCapacity -= cSize;
             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
-                        (U32)cSize);
+                        (unsigned)cSize);
     }   }
 
     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
-    return op-ostart;
+    return (size_t)(op-ostart);
 }
 
 
@@ -2057,18 +2973,19 @@
     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
     U32   const fcsCode = params.fParams.contentSizeFlag ?
                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
-    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
     size_t pos=0;
 
-    if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
+    assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
-                !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
+                !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
 
     if (params.format == ZSTD_f_zstd1) {
         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
         pos = 4;
     }
-    op[pos++] = frameHeaderDecriptionByte;
+    op[pos++] = frameHeaderDescriptionByte;
     if (!singleSegment) op[pos++] = windowLogByte;
     switch(dictIDSizeCode)
     {
@@ -2092,11 +3009,11 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
 {
-    if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
+    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
     {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
         MEM_writeLE24(dst, cBlockHeader24);
         return ZSTD_blockHeaderSize;
@@ -2105,10 +3022,9 @@
 
 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
 {
-    if (cctx->stage != ZSTDcs_init)
-        return ERROR(stage_wrong);
-    if (cctx->appliedParams.ldmParams.enableLdm)
-        return ERROR(parameter_unsupported);
+    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
+    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
+                    parameter_unsupported);
     cctx->externSeqStore.seq = seq;
     cctx->externSeqStore.size = nbSeq;
     cctx->externSeqStore.capacity = nbSeq;
@@ -2122,17 +3038,19 @@
                         const void* src, size_t srcSize,
                                U32 frame, U32 lastFrameChunk)
 {
-    ZSTD_matchState_t* ms = &cctx->blockState.matchState;
+    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
     size_t fhSize = 0;
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
-                cctx->stage, (U32)srcSize);
-    if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
+                cctx->stage, (unsigned)srcSize);
+    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+                    "missing init (ZSTD_compressBegin)");
 
     if (frame && (cctx->stage==ZSTDcs_init)) {
         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
-        if (ZSTD_isError(fhSize)) return fhSize;
+        FORWARD_IF_ERROR(fhSize);
+        assert(fhSize <= dstCapacity);
         dstCapacity -= fhSize;
         dst = (char*)dst + fhSize;
         cctx->stage = ZSTDcs_ongoing;
@@ -2143,22 +3061,31 @@
     if (!ZSTD_window_update(&ms->window, src, srcSize)) {
         ms->nextToUpdate = ms->window.dictLimit;
     }
-    if (cctx->appliedParams.ldmParams.enableLdm)
+    if (cctx->appliedParams.ldmParams.enableLdm) {
         ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+    }
 
-    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
+    if (!frame) {
+        /* overflow check and correction for block mode */
+        ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
+    }
+
+    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
     {   size_t const cSize = frame ?
                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
-        if (ZSTD_isError(cSize)) return cSize;
+        FORWARD_IF_ERROR(cSize);
         cctx->consumedSrcSize += srcSize;
         cctx->producedCSize += (cSize + fhSize);
-        if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
-            if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
-                DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
-                    (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
-                return ERROR(srcSize_wrong);
-            }
+        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
+            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+            RETURN_ERROR_IF(
+                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+                srcSize_wrong,
+                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+                (unsigned)cctx->pledgedSrcSizePlusOne-1,
+                (unsigned)cctx->consumedSrcSize);
         }
         return cSize + fhSize;
     }
@@ -2168,7 +3095,7 @@
                               void* dst, size_t dstCapacity,
                         const void* src, size_t srcSize)
 {
-    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
+    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
 }
 
@@ -2183,49 +3110,66 @@
 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
-    if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
+
     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
 }
 
 /*! ZSTD_loadDictionaryContent() :
  *  @return : 0, or an error code
  */
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize)
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+                                         ZSTD_CCtx_params const* params,
+                                         const void* src, size_t srcSize,
+                                         ZSTD_dictTableLoadMethod_e dtlm)
 {
-    const BYTE* const ip = (const BYTE*) src;
+    const BYTE* ip = (const BYTE*) src;
     const BYTE* const iend = ip + srcSize;
-    ZSTD_compressionParameters const* cParams = &params->cParams;
 
     ZSTD_window_update(&ms->window, src, srcSize);
     ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
 
+    /* Assert that we the ms params match the params we're being given */
+    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
     if (srcSize <= HASH_READ_SIZE) return 0;
 
-    switch(params->cParams.strategy)
-    {
-    case ZSTD_fast:
-        ZSTD_fillHashTable(ms, cParams, iend);
-        break;
-    case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, cParams, iend);
-        break;
+    while (iend - ip > HASH_READ_SIZE) {
+        size_t const remaining = iend - ip;
+        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
+        const BYTE* const ichunk = ip + chunk;
 
-    case ZSTD_greedy:
-    case ZSTD_lazy:
-    case ZSTD_lazy2:
-        if (srcSize >= HASH_READ_SIZE)
-            ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE);
-        break;
+        ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
 
-    case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
-    case ZSTD_btopt:
-    case ZSTD_btultra:
-        if (srcSize >= HASH_READ_SIZE)
-            ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend);
-        break;
+        switch(params->cParams.strategy)
+        {
+        case ZSTD_fast:
+            ZSTD_fillHashTable(ms, ichunk, dtlm);
+            break;
+        case ZSTD_dfast:
+            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
+            break;
 
-    default:
-        assert(0);  /* not possible : not a valid strategy id */
+        case ZSTD_greedy:
+        case ZSTD_lazy:
+        case ZSTD_lazy2:
+            if (chunk >= HASH_READ_SIZE)
+                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
+            break;
+
+        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
+        case ZSTD_btopt:
+        case ZSTD_btultra:
+        case ZSTD_btultra2:
+            if (chunk >= HASH_READ_SIZE)
+                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
+            break;
+
+        default:
+            assert(0);  /* not possible : not a valid strategy id */
+        }
+
+        ip = ichunk;
     }
 
     ms->nextToUpdate = (U32)(iend - ms->window.base);
@@ -2239,9 +3183,9 @@
    NOTE: This behavior is not standard and could be improved in the future. */
 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
     U32 s;
-    if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
     for (s = 0; s <= maxSymbolValue; ++s) {
-        if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
     }
     return 0;
 }
@@ -2256,7 +3200,12 @@
  *  assumptions : magic number supposed already checked
  *                dictSize supposed > 8
  */
-static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, void* workspace)
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+                                      ZSTD_matchState_t* ms,
+                                      ZSTD_CCtx_params const* params,
+                                      const void* dict, size_t dictSize,
+                                      ZSTD_dictTableLoadMethod_e dtlm,
+                                      void* workspace)
 {
     const BYTE* dictPtr = (const BYTE*)dict;
     const BYTE* const dictEnd = dictPtr + dictSize;
@@ -2265,53 +3214,65 @@
     size_t dictID;
 
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+    assert(dictSize > 8);
+    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
 
     dictPtr += 4;   /* skip magic number */
     dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
     dictPtr += 4;
 
     {   unsigned maxSymbolValue = 255;
-        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
-        if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
-        if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
+        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
+        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
         dictPtr += hufHeaderSize;
     }
 
     {   unsigned offcodeLog;
         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        /* fill all offset symbols to avoid garbage at end of table */
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.offcodeCTable,
+                offcodeNCount, MaxOff, offcodeLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += offcodeHeaderSize;
     }
 
     {   short matchlengthNCount[MaxML+1];
         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
         /* Every match length code must have non-zero probability */
-        CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.matchlengthCTable,
+                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += matchlengthHeaderSize;
     }
 
     {   short litlengthNCount[MaxLL+1];
         unsigned litlengthMaxValue = MaxLL, litlengthLog;
         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
         /* Every literal length code must have non-zero probability */
-        CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
-                 dictionary_corrupted);
+        FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+                bs->entropy.fse.litlengthCTable,
+                litlengthNCount, litlengthMaxValue, litlengthLog,
+                workspace, HUF_WORKSPACE_SIZE)),
+            dictionary_corrupted);
         dictPtr += litlengthHeaderSize;
     }
 
-    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
     bs->rep[0] = MEM_readLE32(dictPtr+0);
     bs->rep[1] = MEM_readLE32(dictPtr+4);
     bs->rep[2] = MEM_readLE32(dictPtr+8);
@@ -2324,30 +3285,33 @@
             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
         }
         /* All offset values <= dictContentSize + 128 KB must be representable */
-        CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+        FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
         /* All repCodes must be <= dictContentSize and != 0*/
         {   U32 u;
             for (u=0; u<3; u++) {
-                if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
-                if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
+                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
+                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
         }   }
 
-        bs->entropy.hufCTable_repeatMode = HUF_repeat_valid;
-        bs->entropy.offcode_repeatMode = FSE_repeat_valid;
-        bs->entropy.matchlength_repeatMode = FSE_repeat_valid;
-        bs->entropy.litlength_repeatMode = FSE_repeat_valid;
-        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize));
+        bs->entropy.huf.repeatMode = HUF_repeat_valid;
+        bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
+        bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
+        bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
+        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
         return dictID;
     }
 }
 
 /** ZSTD_compress_insertDictionary() :
 *   @return : dictID, or an error code */
-static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms,
-                                             ZSTD_CCtx_params const* params,
-                                       const void* dict, size_t dictSize,
-                                             ZSTD_dictContentType_e dictContentType,
-                                             void* workspace)
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+                               ZSTD_matchState_t* ms,
+                         const ZSTD_CCtx_params* params,
+                         const void* dict, size_t dictSize,
+                               ZSTD_dictContentType_e dictContentType,
+                               ZSTD_dictTableLoadMethod_e dtlm,
+                               void* workspace)
 {
     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
     if ((dict==NULL) || (dictSize<=8)) return 0;
@@ -2356,30 +3320,30 @@
 
     /* dict restricted modes */
     if (dictContentType == ZSTD_dct_rawContent)
-        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
 
     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
         if (dictContentType == ZSTD_dct_auto) {
             DEBUGLOG(4, "raw content dictionary detected");
-            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
         }
-        if (dictContentType == ZSTD_dct_fullDict)
-            return ERROR(dictionary_wrong);
+        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
         assert(0);   /* impossible */
     }
 
     /* dict as full zstd dictionary */
-    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, workspace);
+    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
 }
 
 /*! ZSTD_compressBegin_internal() :
  * @return : 0, or an error code */
-size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
-                             const void* dict, size_t dictSize,
-                             ZSTD_dictContentType_e dictContentType,
-                             const ZSTD_CDict* cdict,
-                             ZSTD_CCtx_params params, U64 pledgedSrcSize,
-                             ZSTD_buffered_policy_e zbuff)
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+                                    const void* dict, size_t dictSize,
+                                    ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
+                                    const ZSTD_CDict* cdict,
+                                    ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                    ZSTD_buffered_policy_e zbuff)
 {
     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
     /* params are supposed to be fully validated at this point */
@@ -2387,19 +3351,16 @@
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
     if (cdict && cdict->dictContentSize>0) {
-        cctx->requestedParams = params;
-        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params.cParams.windowLog,
-                                         params.fParams, pledgedSrcSize, zbuff);
+        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
     }
 
-    CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
                                      ZSTDcrp_continue, zbuff) );
-    {
-        size_t const dictID = ZSTD_compress_insertDictionary(
+    {   size_t const dictID = ZSTD_compress_insertDictionary(
                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
-                &params, dict, dictSize, dictContentType, cctx->entropyWorkspace);
-        if (ZSTD_isError(dictID)) return dictID;
-        assert(dictID <= (size_t)(U32)-1);
+                &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
+        FORWARD_IF_ERROR(dictID);
+        assert(dictID <= UINT_MAX);
         cctx->dictID = (U32)dictID;
     }
     return 0;
@@ -2408,15 +3369,16 @@
 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
                                     const void* dict, size_t dictSize,
                                     ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
                                     ZSTD_CCtx_params params,
                                     unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
     /* compression parameters verification and optimization */
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
     return ZSTD_compressBegin_internal(cctx,
-                                       dict, dictSize, dictContentType,
+                                       dict, dictSize, dictContentType, dtlm,
                                        cdict,
                                        params, pledgedSrcSize,
                                        ZSTDb_not_buffered);
@@ -2431,7 +3393,7 @@
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     return ZSTD_compressBegin_advanced_internal(cctx,
-                                            dict, dictSize, ZSTD_dct_auto,
+                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                             NULL /*cdict*/,
                                             cctxParams, pledgedSrcSize);
 }
@@ -2441,8 +3403,8 @@
     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
-    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
-    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
+    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
 }
 
@@ -2462,12 +3424,12 @@
     size_t fhSize = 0;
 
     DEBUGLOG(4, "ZSTD_writeEpilogue");
-    if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
+    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
 
     /* special case : empty frame */
     if (cctx->stage == ZSTDcs_init) {
         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
-        if (ZSTD_isError(fhSize)) return fhSize;
+        FORWARD_IF_ERROR(fhSize);
         dstCapacity -= fhSize;
         op += fhSize;
         cctx->stage = ZSTDcs_ongoing;
@@ -2476,7 +3438,7 @@
     if (cctx->stage != ZSTDcs_ending) {
         /* write one last empty block, make it the "last" block */
         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
-        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
         MEM_writeLE32(op, cBlockHeader24);
         op += ZSTD_blockHeaderSize;
         dstCapacity -= ZSTD_blockHeaderSize;
@@ -2484,8 +3446,8 @@
 
     if (cctx->appliedParams.fParams.checksumFlag) {
         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
-        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
-        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
+        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
+        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
         MEM_writeLE32(op, checksum);
         op += 4;
     }
@@ -2502,45 +3464,53 @@
     size_t const cSize = ZSTD_compressContinue_internal(cctx,
                                 dst, dstCapacity, src, srcSize,
                                 1 /* frame mode */, 1 /* last chunk */);
-    if (ZSTD_isError(cSize)) return cSize;
+    FORWARD_IF_ERROR(cSize);
     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
-    if (ZSTD_isError(endResult)) return endResult;
-    if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
+    FORWARD_IF_ERROR(endResult);
+    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
+        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
         DEBUGLOG(4, "end of frame : controlling src size");
-        if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
-            DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
-                (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
-            return ERROR(srcSize_wrong);
-    }   }
+        RETURN_ERROR_IF(
+            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+            srcSize_wrong,
+             "error : pledgedSrcSize = %u, while realSrcSize = %u",
+            (unsigned)cctx->pledgedSrcSizePlusOne-1,
+            (unsigned)cctx->consumedSrcSize);
+    }
     return cSize + endResult;
 }
 
 
 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                         const void* dict,size_t dictSize,
-                               ZSTD_parameters params)
+                                      void* dst, size_t dstCapacity,
+                                const void* src, size_t srcSize,
+                                const void* dict,size_t dictSize,
+                                      ZSTD_parameters params)
 {
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     DEBUGLOG(4, "ZSTD_compress_internal");
     return ZSTD_compress_advanced_internal(cctx,
-                                          dst, dstCapacity,
-                                          src, srcSize,
-                                          dict, dictSize,
-                                          cctxParams);
+                                           dst, dstCapacity,
+                                           src, srcSize,
+                                           dict, dictSize,
+                                           cctxParams);
 }
 
-size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
                                void* dst, size_t dstCapacity,
                          const void* src, size_t srcSize,
                          const void* dict,size_t dictSize,
                                ZSTD_parameters params)
 {
     DEBUGLOG(4, "ZSTD_compress_advanced");
-    CHECK_F(ZSTD_checkCParams(params.cParams));
-    return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
+    return ZSTD_compress_internal(cctx,
+                                  dst, dstCapacity,
+                                  src, srcSize,
+                                  dict, dictSize,
+                                  params);
 }
 
 /* Internal */
@@ -2551,37 +3521,44 @@
         const void* dict,size_t dictSize,
         ZSTD_CCtx_params params)
 {
-    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)",
-                (U32)srcSize);
-    CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
-                                         params, srcSize, ZSTDb_not_buffered) );
+    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+                         params, srcSize, ZSTDb_not_buffered) );
     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
 }
 
-size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
-                               const void* dict, size_t dictSize, int compressionLevel)
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict, size_t dictSize,
+                               int compressionLevel)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0);
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
     ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     assert(params.fParams.contentSizeFlag == 1);
-    ZSTD_CCtxParam_setParameter(&cctxParams, ZSTD_p_compressLiterals, compressionLevel>=0);
     return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
 }
 
-size_t ZSTD_compressCCtx (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+                         void* dst, size_t dstCapacity,
+                   const void* src, size_t srcSize,
+                         int compressionLevel)
 {
-    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
+    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+    assert(cctx != NULL);
     return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
 }
 
-size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+               const void* src, size_t srcSize,
+                     int compressionLevel)
 {
     size_t result;
     ZSTD_CCtx ctxBody;
-    memset(&ctxBody, 0, sizeof(ctxBody));
-    ctxBody.customMem = ZSTD_defaultCMem;
+    ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
-    ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
+    ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
     return result;
 }
 
@@ -2594,7 +3571,7 @@
         size_t dictSize, ZSTD_compressionParameters cParams,
         ZSTD_dictLoadMethod_e dictLoadMethod)
 {
-    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
+    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
     return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
 }
@@ -2608,7 +3585,7 @@
 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
 {
     if (cdict==NULL) return 0;   /* support sizeof on NULL */
-    DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
+    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
     return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
 }
 
@@ -2619,9 +3596,9 @@
                     ZSTD_dictContentType_e dictContentType,
                     ZSTD_compressionParameters cParams)
 {
-    DEBUGLOG(3, "ZSTD_initCDict_internal, dictContentType %u", (U32)dictContentType);
+    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
     assert(!ZSTD_checkCParams(cParams));
-    cdict->cParams = cParams;
+    cdict->matchState.cParams = cParams;
     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
         cdict->dictBuffer = NULL;
         cdict->dictContent = dictBuffer;
@@ -2629,17 +3606,17 @@
         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
         cdict->dictBuffer = internalBuffer;
         cdict->dictContent = internalBuffer;
-        if (!internalBuffer) return ERROR(memory_allocation);
+        RETURN_ERROR_IF(!internalBuffer, memory_allocation);
         memcpy(internalBuffer, dictBuffer, dictSize);
     }
     cdict->dictContentSize = dictSize;
 
     /* Reset the state to no dictionary */
     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
-    {   void* const end = ZSTD_reset_matchState(
-                &cdict->matchState,
-                (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
-                &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
+    {   void* const end = ZSTD_reset_matchState(&cdict->matchState,
+                            (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
+                            &cParams,
+                             ZSTDcrp_continue, ZSTD_resetTarget_CDict);
         assert(end == (char*)cdict->workspace + cdict->workspaceSize);
         (void)end;
     }
@@ -2654,8 +3631,8 @@
         {   size_t const dictID = ZSTD_compress_insertDictionary(
                     &cdict->cBlockState, &cdict->matchState, &params,
                     cdict->dictContent, cdict->dictContentSize,
-                    dictContentType, cdict->workspace);
-            if (ZSTD_isError(dictID)) return dictID;
+                    dictContentType, ZSTD_dtlm_full, cdict->workspace);
+            FORWARD_IF_ERROR(dictID);
             assert(dictID <= (size_t)(U32)-1);
             cdict->dictID = (U32)dictID;
         }
@@ -2669,7 +3646,7 @@
                                       ZSTD_dictContentType_e dictContentType,
                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
 {
-    DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
+    DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
 
     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
@@ -2750,7 +3727,7 @@
     void* ptr;
     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
-        (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
+        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
     if (workspaceSize < neededSize) return NULL;
 
     if (dictLoadMethod == ZSTD_dlm_byCopy) {
@@ -2775,7 +3752,7 @@
 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
 {
     assert(cdict != NULL);
-    return cdict->cParams;
+    return cdict->matchState.cParams;
 }
 
 /* ZSTD_compressBegin_usingCDict_advanced() :
@@ -2785,7 +3762,7 @@
     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
-    if (cdict==NULL) return ERROR(dictionary_wrong);
+    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
     {   ZSTD_CCtx_params params = cctx->requestedParams;
         params.cParams = ZSTD_getCParamsFromCDict(cdict);
         /* Increase window log to fit the entire dictionary and source if the
@@ -2799,7 +3776,7 @@
         }
         params.fParams = fParams;
         return ZSTD_compressBegin_internal(cctx,
-                                           NULL, 0, ZSTD_dct_auto,
+                                           NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                            cdict,
                                            params, pledgedSrcSize,
                                            ZSTDb_not_buffered);
@@ -2813,7 +3790,7 @@
 {
     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
-    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
+    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
 }
 
 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
@@ -2821,7 +3798,7 @@
                                 const void* src, size_t srcSize,
                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
 {
-    CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
+    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
 }
 
@@ -2880,16 +3857,17 @@
 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
                     const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
                     const ZSTD_CDict* const cdict,
-                    ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
+                    ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
 {
-    DEBUGLOG(4, "ZSTD_resetCStream_internal (disableLiteralCompression=%i)",
-                params.disableLiteralCompression);
+    DEBUGLOG(4, "ZSTD_resetCStream_internal");
+    /* Finalize the compression parameters */
+    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
     /* params are supposed to be fully validated at this point */
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
-    CHECK_F( ZSTD_compressBegin_internal(cctx,
-                                         dict, dictSize, dictContentType,
+    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+                                         dict, dictSize, dictContentType, ZSTD_dtlm_fast,
                                          cdict,
                                          params, pledgedSrcSize,
                                          ZSTDb_buffered) );
@@ -2906,14 +3884,17 @@
 
 /* ZSTD_resetCStream():
  * pledgedSrcSize == 0 means "unknown" */
-size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
 {
-    ZSTD_CCtx_params params = zcs->requestedParams;
-    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
-    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
-    params.fParams.contentSizeFlag = 1;
-    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, 0);
-    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+    /* temporary : 0 interpreted as "unknown" during transition period.
+     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+     * 0 will be interpreted as "empty" in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    return 0;
 }
 
 /*! ZSTD_initCStream_internal() :
@@ -2925,31 +3906,18 @@
                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_internal");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    zcs->requestedParams = params;
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-
-    if (dict && dictSize >= 8) {
-        DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
-        if (zcs->staticSize) {   /* static CCtx : never uses malloc */
-            /* incompatible with internal cdict creation */
-            return ERROR(memory_allocation);
-        }
-        ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
-                                            ZSTD_dlm_byCopy, ZSTD_dct_auto,
-                                            params.cParams, zcs->customMem);
-        zcs->cdict = zcs->cdictLocal;
-        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
+    if (dict) {
+        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
     } else {
-        if (cdict) {
-            params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
-        }
-        ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = NULL;
-        zcs->cdict = cdict;
+        /* Dictionary is cleared if !cdict */
+        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
     }
-
-    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+    return 0;
 }
 
 /* ZSTD_initCStream_usingCDict_advanced() :
@@ -2960,22 +3928,20 @@
                                             unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
-    if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
-    {   ZSTD_CCtx_params params = zcs->requestedParams;
-        params.cParams = ZSTD_getCParamsFromCDict(cdict);
-        params.fParams = fParams;
-        return ZSTD_initCStream_internal(zcs,
-                                NULL, 0, cdict,
-                                params, pledgedSrcSize);
-    }
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    zcs->requestedParams.fParams = fParams;
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+    return 0;
 }
 
 /* note : cdict must outlive compression session */
 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
 {
-    ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
-    return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+    return 0;
 }
 
 
@@ -2985,43 +3951,66 @@
  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
                                  const void* dict, size_t dictSize,
-                                 ZSTD_parameters params, unsigned long long pledgedSrcSize)
+                                 ZSTD_parameters params, unsigned long long pss)
 {
-    DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
-                (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
-    CHECK_F( ZSTD_checkCParams(params.cParams) );
-    if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
-    {   ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-        return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
-    }
+    /* for compatibility with older programs relying on this behavior.
+     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+     * This line will be removed in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+    DEBUGLOG(4, "ZSTD_initCStream_advanced");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
+    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
-    ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
 {
-    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
-    ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
+    /* temporary : 0 interpreted as "unknown" during transition period.
+     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+     * 0 will be interpreted as "empty" in the future.
+     */
+    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+    return 0;
 }
 
 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
 {
     DEBUGLOG(4, "ZSTD_initCStream");
-    return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
+    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+    return 0;
 }
 
 /*======   Compression   ======*/
 
-MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
-                           const void* src, size_t srcSize)
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+    if (hintInSize==0) hintInSize = cctx->blockSize;
+    return hintInSize;
+}
+
+static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
+                       const void* src, size_t srcSize)
 {
     size_t const length = MIN(dstCapacity, srcSize);
     if (length) memcpy(dst, src, length);
@@ -3029,13 +4018,13 @@
 }
 
 /** ZSTD_compressStream_generic():
- *  internal function for all *compressStream*() variants and *compress_generic()
+ *  internal function for all *compressStream*() variants
  *  non-static, because can be called from zstdmt_compress.c
  * @return : hint size for next input */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
-                                   ZSTD_outBuffer* output,
-                                   ZSTD_inBuffer* input,
-                                   ZSTD_EndDirective const flushMode)
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+                                          ZSTD_outBuffer* output,
+                                          ZSTD_inBuffer* input,
+                                          ZSTD_EndDirective const flushMode)
 {
     const char* const istart = (const char*)input->src;
     const char* const iend = istart + input->size;
@@ -3046,7 +4035,7 @@
     U32 someMoreWork = 1;
 
     /* check expectations */
-    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
+    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
     assert(zcs->inBuff != NULL);
     assert(zcs->inBuffSize > 0);
     assert(zcs->outBuff !=  NULL);
@@ -3058,8 +4047,7 @@
         switch(zcs->streamStage)
         {
         case zcss_init:
-            /* call ZSTD_initCStream() first ! */
-            return ERROR(init_missing);
+            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
 
         case zcss_load:
             if ( (flushMode == ZSTD_e_end)
@@ -3068,12 +4056,12 @@
                 /* shortcut to compression pass directly into output buffer */
                 size_t const cSize = ZSTD_compressEnd(zcs,
                                                 op, oend-op, ip, iend-ip);
-                DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
-                if (ZSTD_isError(cSize)) return cSize;
+                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
+                FORWARD_IF_ERROR(cSize);
                 ip = iend;
                 op += cSize;
                 zcs->frameEnded = 1;
-                ZSTD_startNewCompression(zcs);
+                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
                 someMoreWork = 0; break;
             }
             /* complete loading into inBuffer */
@@ -3110,14 +4098,14 @@
                                     zcs->inBuff + zcs->inToCompress, iSize) :
                         ZSTD_compressContinue(zcs, cDst, oSize,
                                     zcs->inBuff + zcs->inToCompress, iSize);
-                if (ZSTD_isError(cSize)) return cSize;
+                FORWARD_IF_ERROR(cSize);
                 zcs->frameEnded = lastBlock;
                 /* prepare next block */
                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
                 if (zcs->inBuffTarget > zcs->inBuffSize)
                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
-                         (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
+                         (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
                 if (!lastBlock)
                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
                 zcs->inToCompress = zcs->inBuffPos;
@@ -3126,7 +4114,7 @@
                     if (zcs->frameEnded) {
                         DEBUGLOG(5, "Frame completed directly in outBuffer");
                         someMoreWork = 0;
-                        ZSTD_startNewCompression(zcs);
+                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
                     }
                     break;
                 }
@@ -3138,10 +4126,10 @@
         case zcss_flush:
             DEBUGLOG(5, "flush stage");
             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
-                size_t const flushed = ZSTD_limitCopy(op, oend-op,
+                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
-                            (U32)toFlush, (U32)(oend-op), (U32)flushed);
+                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
                 op += flushed;
                 zcs->outBuffFlushedSize += flushed;
                 if (toFlush!=flushed) {
@@ -3154,7 +4142,7 @@
                 if (zcs->frameEnded) {
                     DEBUGLOG(5, "Frame completed on flush");
                     someMoreWork = 0;
-                    ZSTD_startNewCompression(zcs);
+                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
                     break;
                 }
                 zcs->streamStage = zcss_load;
@@ -3169,63 +4157,67 @@
     input->pos = ip - istart;
     output->pos = op - ostart;
     if (zcs->frameEnded) return 0;
-    {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
-        if (hintInSize==0) hintInSize = zcs->blockSize;
-        return hintInSize;
+    return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+    if (cctx->appliedParams.nbWorkers >= 1) {
+        assert(cctx->mtctx != NULL);
+        return ZSTDMT_nextInputSizeHint(cctx->mtctx);
     }
+#endif
+    return ZSTD_nextInputSizeHint(cctx);
+
 }
 
 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
 {
-    /* check conditions */
-    if (output->pos > output->size) return ERROR(GENERIC);
-    if (input->pos  > input->size)  return ERROR(GENERIC);
-
-    return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
+    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
+    return ZSTD_nextInputSizeHint_MTorST(zcs);
 }
 
 
-size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
-                              ZSTD_outBuffer* output,
-                              ZSTD_inBuffer* input,
-                              ZSTD_EndDirective endOp)
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+                             ZSTD_outBuffer* output,
+                             ZSTD_inBuffer* input,
+                             ZSTD_EndDirective endOp)
 {
-    DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
+    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
     /* check conditions */
-    if (output->pos > output->size) return ERROR(GENERIC);
-    if (input->pos  > input->size)  return ERROR(GENERIC);
+    RETURN_ERROR_IF(output->pos > output->size, GENERIC);
+    RETURN_ERROR_IF(input->pos  > input->size, GENERIC);
     assert(cctx!=NULL);
 
     /* transparent initialization stage */
     if (cctx->streamStage == zcss_init) {
         ZSTD_CCtx_params params = cctx->requestedParams;
         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
-        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
-        assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
-        DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
+        FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
+        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
+        assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
+        DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
         if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
         params.cParams = ZSTD_getCParamsFromCCtxParams(
                 &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
 
+
 #ifdef ZSTD_MULTITHREAD
         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
             params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
         }
         if (params.nbWorkers > 0) {
             /* mt context creation */
-            if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) {
-                DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
+            if (cctx->mtctx == NULL) {
+                DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
                             params.nbWorkers);
-                if (cctx->mtctx != NULL)
-                    DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u",
-                                ZSTDMT_getNbWorkers(cctx->mtctx));
-                ZSTDMT_freeCCtx(cctx->mtctx);
                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
-                if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
             }
             /* mt compression */
             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
-            CHECK_F( ZSTDMT_initCStream_internal(
+            FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
                         cctx->mtctx,
                         prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
@@ -3233,35 +4225,47 @@
             cctx->appliedParams.nbWorkers = params.nbWorkers;
         } else
 #endif
-        {   CHECK_F( ZSTD_resetCStream_internal(cctx,
+        {   FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
                             prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
                             cctx->cdict,
                             params, cctx->pledgedSrcSizePlusOne-1) );
             assert(cctx->streamStage == zcss_load);
             assert(cctx->appliedParams.nbWorkers == 0);
     }   }
+    /* end of transparent initialization stage */
 
     /* compression stage */
 #ifdef ZSTD_MULTITHREAD
     if (cctx->appliedParams.nbWorkers > 0) {
+        int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
+        size_t flushMin;
+        assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
         if (cctx->cParamsChanged) {
             ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
             cctx->cParamsChanged = 0;
         }
-        {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+        do {
+            flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
             if ( ZSTD_isError(flushMin)
               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
-                ZSTD_startNewCompression(cctx);
+                ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
             }
-            return flushMin;
-    }   }
+            FORWARD_IF_ERROR(flushMin);
+        } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
+        DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
+        /* Either we don't require maximum forward progress, we've finished the
+         * flush, or we are out of output space.
+         */
+        assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
+        return flushMin;
+    }
 #endif
-    CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
-    DEBUGLOG(5, "completed ZSTD_compress_generic");
+    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
+    DEBUGLOG(5, "completed ZSTD_compressStream2");
     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
 }
 
-size_t ZSTD_compress_generic_simpleArgs (
+size_t ZSTD_compressStream2_simpleArgs (
                             ZSTD_CCtx* cctx,
                             void* dst, size_t dstCapacity, size_t* dstPos,
                       const void* src, size_t srcSize, size_t* srcPos,
@@ -3269,13 +4273,33 @@
 {
     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
-    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
-    size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
+    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
     *dstPos = output.pos;
     *srcPos = input.pos;
     return cErr;
 }
 
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+                      void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+    {   size_t oPos = 0;
+        size_t iPos = 0;
+        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+                                        dst, dstCapacity, &oPos,
+                                        src, srcSize, &iPos,
+                                        ZSTD_e_end);
+        FORWARD_IF_ERROR(result);
+        if (result != 0) {  /* compression not completed, due to lack of output space */
+            assert(oPos == dstCapacity);
+            RETURN_ERROR(dstSize_tooSmall);
+        }
+        assert(iPos == srcSize);   /* all input is expected consumed */
+        return oPos;
+    }
+}
 
 /*======   Finalize   ======*/
 
@@ -3284,21 +4308,21 @@
 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
 {
     ZSTD_inBuffer input = { NULL, 0, 0 };
-    if (output->pos > output->size) return ERROR(GENERIC);
-    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
-    return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
+    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
 }
 
 
 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
 {
     ZSTD_inBuffer input = { NULL, 0, 0 };
-    if (output->pos > output->size) return ERROR(GENERIC);
-    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
+    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+    FORWARD_IF_ERROR( remainingToFlush );
+    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
+    /* single thread mode : attempt to calculate remaining to flush more precisely */
     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
-        size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
-        size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
-        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
+        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
+        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
         return toFlush;
     }
 }
@@ -3308,122 +4332,123 @@
 
 #define ZSTD_MAX_CLEVEL     22
 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
 
 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
-{   /* "default" - guarantees a monotonically increasing memory budget */
+{   /* "default" - for any srcSize > 256 KB */
     /* W,  C,  H,  S,  L, TL, strat */
     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
-    { 19, 13, 14,  1,  7,  1, ZSTD_fast    },  /* level  1 */
-    { 19, 15, 16,  1,  6,  1, ZSTD_fast    },  /* level  2 */
-    { 20, 16, 17,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 20, 17, 18,  1,  5,  8, ZSTD_dfast   },  /* level  4 */
-    { 20, 17, 18,  2,  5, 16, ZSTD_greedy  },  /* level  5 */
-    { 21, 17, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
-    { 21, 18, 19,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
-    { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
-    { 21, 19, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
-    { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
-    { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
-    { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
-    { 22, 21, 22,  4,  5, 32, ZSTD_btlazy2 },  /* level 13 */
-    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
-    { 22, 22, 22,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
-    { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
-    { 23, 22, 22,  4,  4, 48, ZSTD_btopt   },  /* level 17 */
-    { 23, 22, 22,  5,  3, 64, ZSTD_btopt   },  /* level 18 */
-    { 23, 23, 22,  7,  3,128, ZSTD_btopt   },  /* level 19 */
-    { 25, 25, 23,  7,  3,128, ZSTD_btultra },  /* level 20 */
-    { 26, 26, 24,  7,  3,256, ZSTD_btultra },  /* level 21 */
-    { 27, 27, 25,  9,  3,512, ZSTD_btultra },  /* level 22 */
+    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
+    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
+    { 21, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
+    { 21, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
+    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
+    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
+    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
+    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
+    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
+    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
+    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
+    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
+    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
+    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
+    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
+    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
+    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
+    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
+    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
+    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
+    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
+    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
 },
 {   /* for srcSize <= 256 KB */
     /* W,  C,  H,  S,  L,  T, strat */
     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 18, 13, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
-    { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
-    { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
-    { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
-    { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
-    { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
-    { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
-    { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
-    { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
-    { 18, 18, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 12.*/
-    { 18, 19, 17,  7,  4,  8, ZSTD_btlazy2 },  /* level 13 */
-    { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
-    { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
-    { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
-    { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
-    { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
-    { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
-    { 18, 19, 18, 11,  3,512, ZSTD_btultra },  /* level 20.*/
-    { 18, 19, 18, 12,  3,512, ZSTD_btultra },  /* level 21.*/
-    { 18, 19, 18, 13,  3,512, ZSTD_btultra },  /* level 22.*/
+    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
+    { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
+    { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
+    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
+    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
+    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
+    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
+    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
+    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
+    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
+    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
+    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
+    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
+    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
+    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
+    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
+    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
+    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
+    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
 },
 {   /* for srcSize <= 128 KB */
     /* W,  C,  H,  S,  L,  T, strat */
-    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* level  0 - not used */
-    { 17, 12, 13,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 17, 13, 16,  1,  5,  1, ZSTD_fast    },  /* level  2 */
-    { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
-    { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
-    { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
-    { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
+    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
+    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
+    { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
+    { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
+    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
+    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
+    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
-    { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
-    { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
-    { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
-    { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
-    { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
-    { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
-    { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
-    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
-    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
-    { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
-    { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
-    { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
+    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
+    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
+    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
+    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
+    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
+    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
+    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
+    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
+    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
+    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
+    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
+    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
 },
 {   /* for srcSize <= 16 KB */
     /* W,  C,  H,  S,  L,  T, strat */
     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 14, 14, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 14, 14, 14,  1,  4,  1, ZSTD_fast    },  /* level  2 */
-    { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
-    { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
-    { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
-    { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
-    { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
-    { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
-    { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
-    { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
-    { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
-    { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
-    { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
-    { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
-    { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
-    { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
-    { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
-    { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
-    { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
-    { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
-    { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
-    { 14, 15, 15, 10,  3,256, ZSTD_btultra },  /* level 22.*/
+    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
+    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
+    { 14, 14, 15,  2,  4,  1, ZSTD_dfast   },  /* level  3 */
+    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
+    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
+    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
+    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
+    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
+    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
+    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
+    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
+    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
+    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
+    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
+    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
+    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
+    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
+    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
+    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
+    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
+    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
 },
 };
 
 /*! ZSTD_getCParams() :
-*  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
-*   Size values are optional, provide 0 if not known or unused */
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ *  Size values are optional, provide 0 if not known or unused */
 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
 {
     size_t const addedSize = srcSizeHint ? 0 : 500;
-    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
-    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
+    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN;  /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
+    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
     int row = compressionLevel;
     DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
     if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
@@ -3431,13 +4456,14 @@
     if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
     {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
         if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
-        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
-
+        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);               /* refine parameters based on srcSize & dictSize */
+    }
 }
 
 /*! ZSTD_getParams() :
-*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
-*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ *  same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ *  Fields of `ZSTD_frameParameters` are set to default values */
 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
     ZSTD_parameters params;
     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
index 81f12ca..5495899 100644
--- a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
+++ b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
@@ -27,17 +27,19 @@
 extern "C" {
 #endif
 
+
 /*-*************************************
 *  Constants
 ***************************************/
 #define kSearchStrength      8
 #define HASH_READ_SIZE       8
-#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index 1 now means "unsorted".
+#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
                                        It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
                                        It's not a big deal though : candidate will just be sorted again.
-                                       Additionnally, candidate position 1 will be lost.
+                                       Additionally, candidate position 1 will be lost.
                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
-                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
+                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
 
 
 /*-*************************************
@@ -53,14 +55,30 @@
 } ZSTD_prefixDict;
 
 typedef struct {
-    U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
+    void* dictBuffer;
+    void const* dict;
+    size_t dictSize;
+    ZSTD_dictContentType_e dictContentType;
+    ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
+    U32 CTable[HUF_CTABLE_SIZE_U32(255)];
+    HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
     FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
     FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
     FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
-    HUF_repeat hufCTable_repeatMode;
     FSE_repeat offcode_repeatMode;
     FSE_repeat matchlength_repeatMode;
     FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+    ZSTD_hufCTables_t huf;
+    ZSTD_fseCTables_t fse;
 } ZSTD_entropyCTables_t;
 
 typedef struct {
@@ -76,26 +94,28 @@
     U32 rep[ZSTD_REP_NUM];
 } ZSTD_optimal_t;
 
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
 typedef struct {
     /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
-    U32* litFreq;               /* table of literals statistics, of size 256 */
-    U32* litLengthFreq;         /* table of litLength statistics, of size (MaxLL+1) */
-    U32* matchLengthFreq;       /* table of matchLength statistics, of size (MaxML+1) */
-    U32* offCodeFreq;           /* table of offCode statistics, of size (MaxOff+1) */
-    ZSTD_match_t* matchTable;   /* list of found matches, of size ZSTD_OPT_NUM+1 */
-    ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+    unsigned* litFreq;           /* table of literals statistics, of size 256 */
+    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
+    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
+    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
+    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
+    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
 
     U32  litSum;                 /* nb of literals */
     U32  litLengthSum;           /* nb of litLength codes */
     U32  matchLengthSum;         /* nb of matchLength codes */
     U32  offCodeSum;             /* nb of offset codes */
-    /* begin updated by ZSTD_setLog2Prices */
-    U32  log2litSum;             /* pow2 to compare log2(litfreq) to */
-    U32  log2litLengthSum;       /* pow2 to compare log2(llfreq) to */
-    U32  log2matchLengthSum;     /* pow2 to compare log2(mlfreq) to */
-    U32  log2offCodeSum;         /* pow2 to compare log2(offreq) to */
-    /* end : updated by ZSTD_setLog2Prices */
-    U32  staticPrices;           /* prices follow a pre-defined cost structure, statistics are irrelevant */
+    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
+    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
+    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
+    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
+    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
+    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
+    ZSTD_literalCompressionMode_e literalCompressionMode;
 } optState_t;
 
 typedef struct {
@@ -108,20 +128,22 @@
     BYTE const* base;       /* All regular indexes relative to this position */
     BYTE const* dictBase;   /* extDict indexes relative to this position */
     U32 dictLimit;          /* below that point, need extDict */
-    U32 lowLimit;           /* below that point, no more data */
+    U32 lowLimit;           /* below that point, no more valid data */
 } ZSTD_window_t;
 
-typedef struct {
-    ZSTD_window_t window;      /* State for window round buffer management */
-    U32 loadedDictEnd;         /* index of end of dictionary */
-    U32 nextToUpdate;          /* index from which to continue table update */
-    U32 nextToUpdate3;         /* index from which to continue table update */
-    U32 hashLog3;              /* dispatch table : larger == faster, more memory */
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+struct ZSTD_matchState_t {
+    ZSTD_window_t window;   /* State for window round buffer management */
+    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential. When dict referential is copied into active context (i.e. not attached), effectively same value as dictSize, since referential starts from zero */
+    U32 nextToUpdate;       /* index from which to continue table update */
+    U32 hashLog3;           /* dispatch table : larger == faster, more memory */
     U32* hashTable;
     U32* hashTable3;
     U32* chainTable;
     optState_t opt;         /* optimal parser state */
-} ZSTD_matchState_t;
+    const ZSTD_matchState_t* dictMatchState;
+    ZSTD_compressionParameters cParams;
+};
 
 typedef struct {
     ZSTD_compressedBlockState_t* prevCBlock;
@@ -147,7 +169,7 @@
     U32 hashLog;            /* Log size of hashTable */
     U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
     U32 minMatchLength;     /* Minimum match length */
-    U32 hashEveryLog;       /* Log number of entries to skip */
+    U32 hashRateLog;       /* Log number of entries to skip */
     U32 windowLog;          /* Window log for the LDM */
 } ldmParams_t;
 
@@ -161,7 +183,7 @@
   rawSeq* seq;     /* The start of the sequences */
   size_t pos;      /* The position where reading stopped. <= size. */
   size_t size;     /* The number of sequences. <= capacity. */
-  size_t capacity; /* The capacity of the `seq` pointer */
+  size_t capacity; /* The capacity starting from `seq` pointer */
 } rawSeqStore_t;
 
 struct ZSTD_CCtx_params_s {
@@ -170,14 +192,20 @@
     ZSTD_frameParameters fParams;
 
     int compressionLevel;
-    int disableLiteralCompression;
     int forceWindow;           /* force back-references to respect limit of
                                 * 1<<wLog, even for dictionary */
+    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
+                                * No target when targetCBlockSize == 0.
+                                * There is no guarantee on compressed block size */
+
+    ZSTD_dictAttachPref_e attachDictPref;
+    ZSTD_literalCompressionMode_e literalCompressionMode;
 
     /* Multithreading: used to pass parameters to mtctx */
-    unsigned nbWorkers;
-    unsigned jobSize;
-    unsigned overlapSizeLog;
+    int nbWorkers;
+    size_t jobSize;
+    int overlapLog;
+    int rsyncable;
 
     /* Long distance matching parameters */
     ldmParams_t ldmParams;
@@ -193,6 +221,8 @@
     ZSTD_CCtx_params requestedParams;
     ZSTD_CCtx_params appliedParams;
     U32   dictID;
+
+    int workSpaceOversizedDuration;
     void* workSpace;
     size_t workSpaceSize;
     size_t blockSize;
@@ -225,7 +255,7 @@
     U32    frameEnded;
 
     /* Dictionary */
-    ZSTD_CDict* cdictLocal;
+    ZSTD_localDict localDict;
     const ZSTD_CDict* cdict;
     ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
 
@@ -235,11 +265,15 @@
 #endif
 };
 
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
+
 
 typedef size_t (*ZSTD_blockCompressor) (
         ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
+        void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
 
 
 MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -280,17 +314,19 @@
 */
 MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
 {
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
     static const BYTE* g_start = NULL;
     if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
     {   U32 const pos = (U32)((const BYTE*)literals - g_start);
-        DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u",
+        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
                pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
     }
 #endif
+    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
     /* copy Literals */
-    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
-    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+    assert(seqStorePtr->maxNbLit <= 128 KB);
+    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
+    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength, ZSTD_no_overlap);
     seqStorePtr->lit += litLength;
 
     /* literal Length */
@@ -420,6 +456,11 @@
     const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
     size_t const matchLength = ZSTD_count(ip, match, vEnd);
     if (match + matchLength != mEnd) return matchLength;
+    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
     return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
 }
 
@@ -464,9 +505,70 @@
     }
 }
 
+/** ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+    U64 power = 1;
+    while (exponent) {
+      if (exponent & 1) power *= base;
+      exponent >>= 1;
+      base *= base;
+    }
+    return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/** ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+    BYTE const* istart = (BYTE const*)buf;
+    size_t pos;
+    for (pos = 0; pos < size; ++pos) {
+        hash *= prime8bytes;
+        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+    }
+    return hash;
+}
+
+/** ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+    return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/** ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+    return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/** ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+    hash *= prime8bytes;
+    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+    return hash;
+}
+
 /*-*************************************
 *  Round buffer management
 ***************************************/
+#if (ZSTD_WINDOWLOG_MAX_64 > 31)
+# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
+#endif
 /* Max current allowed */
 #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
 /* Maximum chunk size before overflow correction needs to be called again */
@@ -497,6 +599,20 @@
 }
 
 /**
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+    return ZSTD_window_hasExtDict(ms->window) ?
+        ZSTD_extDict :
+        ms->dictMatchState != NULL ?
+            ZSTD_dictMatchState :
+            ZSTD_noDict;
+}
+
+/**
  * ZSTD_window_needOverflowCorrection():
  * Returns non-zero if the indices are getting too large and need overflow
  * protection.
@@ -563,31 +679,83 @@
  * ZSTD_window_enforceMaxDist():
  * Updates lowLimit so that:
  *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
- * This allows a simple check that index >= lowLimit to see if index is valid.
- * This must be called before a block compression call, with srcEnd as the block
- * source end.
- * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
- * This is because dictionaries are allowed to be referenced as long as the last
- * byte of the dictionary is in the window, but once they are out of range,
- * they cannot be referenced. If loadedDictEndPtr is NULL, we use
- * loadedDictEnd == 0.
+ *
+ * It ensures index is valid as long as index >= lowLimit.
+ * This must be called before a block compression call.
+ *
+ * loadedDictEnd is only defined if a dictionary is in use for current compression.
+ * As the name implies, loadedDictEnd represents the index at end of dictionary.
+ * The value lies within context's referential, it can be directly compared to blockEndIdx.
+ *
+ * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
+ * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
+ * This is because dictionaries are allowed to be referenced fully
+ * as long as the last byte of the dictionary is in the window.
+ * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
+ *
+ * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
+ * In dictMatchState mode, lowLimit and dictLimit are the same,
+ * and the dictionary is below them.
+ * forceWindow and dictMatchState are therefore incompatible.
  */
-MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
-                                           void const* srcEnd, U32 maxDist,
-                                           U32* loadedDictEndPtr)
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+                     const void* blockEnd,
+                           U32   maxDist,
+                           U32*  loadedDictEndPtr,
+                     const ZSTD_matchState_t** dictMatchStatePtr)
 {
-    U32 const current = (U32)((BYTE const*)srcEnd - window->base);
-    U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
-    if (current > maxDist + loadedDictEnd) {
-        U32 const newLowLimit = current - maxDist;
+    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+    /* - When there is no dictionary : loadedDictEnd == 0.
+         In which case, the test (blockEndIdx > maxDist) is merely to avoid
+         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
+       - When there is a standard dictionary :
+         Index referential is copied from the dictionary,
+         which means it starts from 0.
+         In which case, loadedDictEnd == dictSize,
+         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
+         since `blockEndIdx` also starts from zero.
+       - When there is an attached dictionary :
+         loadedDictEnd is expressed within the referential of the context,
+         so it can be directly compared against blockEndIdx.
+    */
+    if (blockEndIdx > maxDist + loadedDictEnd) {
+        U32 const newLowLimit = blockEndIdx - maxDist;
         if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
         if (window->dictLimit < window->lowLimit) {
-            DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
-                     window->lowLimit);
+            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
             window->dictLimit = window->lowLimit;
         }
-        if (loadedDictEndPtr)
-            *loadedDictEndPtr = 0;
+        /* On reaching window size, dictionaries are invalidated */
+        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
+    }
+}
+
+/* Similar to ZSTD_window_enforceMaxDist(),
+ * but only invalidates dictionary
+ * when input progresses beyond window size. */
+MEM_STATIC void
+ZSTD_checkDictValidity(ZSTD_window_t* window,
+                       const void* blockEnd,
+                             U32   maxDist,
+                             U32*  loadedDictEndPtr,
+                       const ZSTD_matchState_t** dictMatchStatePtr)
+{
+    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+    DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+    if (loadedDictEnd && (blockEndIdx > maxDist + loadedDictEnd)) {
+        /* On reaching window size, dictionaries are invalidated */
+        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
     }
 }
 
@@ -603,12 +771,12 @@
 {
     BYTE const* const ip = (BYTE const*)src;
     U32 contiguous = 1;
+    DEBUGLOG(5, "ZSTD_window_update");
     /* Check if blocks follow each other */
     if (src != window->nextSrc) {
         /* not contiguous */
         size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
-        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
-                 window->dictLimit);
+        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
         window->lowLimit = window->dictLimit;
         assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
         window->dictLimit = (U32)distanceFromBase;
@@ -625,10 +793,44 @@
         ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
         window->lowLimit = lowLimitMax;
+        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
     }
     return contiguous;
 }
 
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+    U32 const fp_accuracy = 8;
+    U32 const fp_multiplier = (1 << fp_accuracy);
+    U32 const newStat = rawStat + 1;
+    U32 const hb = ZSTD_highbit32(newStat);
+    U32 const BWeight = hb * fp_multiplier;
+    U32 const FWeight = (newStat << fp_accuracy) >> hb;
+    U32 const weight = BWeight + FWeight;
+    assert(hb + fp_accuracy < 31);
+    return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+    unsigned u, sum;
+    for (u=0, sum=0; u<=max; u++) sum += table[u];
+    DEBUGLOG(2, "total nb elts: %u", sum);
+    for (u=0; u<=max; u++) {
+        DEBUGLOG(2, "%2u: %5u  (%.2f)",
+                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+    }
+}
+
+#endif
+
+
 #if defined (__cplusplus)
 }
 #endif
@@ -640,7 +842,7 @@
  * ============================================================== */
 
 /* ZSTD_getCParamsFromCCtxParams() :
- * cParams are built depending on compressionLevel, src size hints, 
+ * cParams are built depending on compressionLevel, src size hints,
  * LDM and manually set compression parameters.
  */
 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
@@ -656,12 +858,7 @@
                      const ZSTD_CDict* cdict,
                      ZSTD_CCtx_params  params, unsigned long long pledgedSrcSize);
 
-/*! ZSTD_compressStream_generic() :
- *  Private use only. To be called from zstdmt_compress.c in single-thread mode. */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
-                                   ZSTD_outBuffer* output,
-                                   ZSTD_inBuffer* input,
-                                   ZSTD_EndDirective const flushMode);
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
 
 /*! ZSTD_getCParamsFromCDict() :
  *  as the name implies */
@@ -672,6 +869,7 @@
 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
                                     const void* dict, size_t dictSize,
                                     ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
                                     ZSTD_CCtx_params params,
                                     unsigned long long pledgedSrcSize);
@@ -688,7 +886,7 @@
 /* ZSTD_writeLastEmptyBlock() :
  * output an empty Block with end-of-frame mark to complete a frame
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
  */
 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
 
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.c b/vendor/github.com/DataDog/zstd/zstd_ddict.c
new file mode 100644
index 0000000..0af3d23
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ddict.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_ddict.c :
+ * concentrates all logic that needs to know the internals of ZSTD_DDict object */
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include <string.h>      /* memcpy, memmove, memset */
+#include "cpu.h"         /* bmi2 */
+#include "mem.h"         /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_decompress_internal.h"
+#include "zstd_ddict.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+#  include "zstd_legacy.h"
+#endif
+
+
+
+/*-*******************************************************
+*  Types
+*********************************************************/
+struct ZSTD_DDict_s {
+    void* dictBuffer;
+    const void* dictContent;
+    size_t dictSize;
+    ZSTD_entropyDTables_t entropy;
+    U32 dictID;
+    U32 entropyPresent;
+    ZSTD_customMem cMem;
+};  /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
+{
+    assert(ddict != NULL);
+    return ddict->dictContent;
+}
+
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
+{
+    assert(ddict != NULL);
+    return ddict->dictSize;
+}
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+    DEBUGLOG(4, "ZSTD_copyDDictParameters");
+    assert(dctx != NULL);
+    assert(ddict != NULL);
+    dctx->dictID = ddict->dictID;
+    dctx->prefixStart = ddict->dictContent;
+    dctx->virtualStart = ddict->dictContent;
+    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+    dctx->previousDstEnd = dctx->dictEnd;
+    if (ddict->entropyPresent) {
+        dctx->litEntropy = 1;
+        dctx->fseEntropy = 1;
+        dctx->LLTptr = ddict->entropy.LLTable;
+        dctx->MLTptr = ddict->entropy.MLTable;
+        dctx->OFTptr = ddict->entropy.OFTable;
+        dctx->HUFptr = ddict->entropy.hufTable;
+        dctx->entropy.rep[0] = ddict->entropy.rep[0];
+        dctx->entropy.rep[1] = ddict->entropy.rep[1];
+        dctx->entropy.rep[2] = ddict->entropy.rep[2];
+    } else {
+        dctx->litEntropy = 0;
+        dctx->fseEntropy = 0;
+    }
+}
+
+
+static size_t
+ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
+                           ZSTD_dictContentType_e dictContentType)
+{
+    ddict->dictID = 0;
+    ddict->entropyPresent = 0;
+    if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+    if (ddict->dictSize < 8) {
+        if (dictContentType == ZSTD_dct_fullDict)
+            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
+        return 0;   /* pure content mode */
+    }
+    {   U32 const magic = MEM_readLE32(ddict->dictContent);
+        if (magic != ZSTD_MAGIC_DICTIONARY) {
+            if (dictContentType == ZSTD_dct_fullDict)
+                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
+            return 0;   /* pure content mode */
+        }
+    }
+    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+    /* load entropy tables */
+    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+        dictionary_corrupted);
+    ddict->entropyPresent = 1;
+    return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+                                      const void* dict, size_t dictSize,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictContentType_e dictContentType)
+{
+    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+        ddict->dictBuffer = NULL;
+        ddict->dictContent = dict;
+        if (!dict) dictSize = 0;
+    } else {
+        void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
+        ddict->dictBuffer = internalBuffer;
+        ddict->dictContent = internalBuffer;
+        if (!internalBuffer) return ERROR(memory_allocation);
+        memcpy(internalBuffer, dict, dictSize);
+    }
+    ddict->dictSize = dictSize;
+    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
+
+    /* parse dictionary content */
+    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
+
+    return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictContentType_e dictContentType,
+                                      ZSTD_customMem customMem)
+{
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+        if (ddict == NULL) return NULL;
+        ddict->cMem = customMem;
+        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
+                                            dict, dictSize,
+                                            dictLoadMethod, dictContentType);
+            if (ZSTD_isError(initResult)) {
+                ZSTD_freeDDict(ddict);
+                return NULL;
+        }   }
+        return ddict;
+    }
+}
+
+/*! ZSTD_createDDict() :
+*   Create a digested dictionary, to start decompression without startup delay.
+*   `dict` content is copied inside DDict.
+*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, to start decompression without startup delay.
+ *  Dictionary content is simply referenced, it will be accessed during decompression.
+ *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+                                void* sBuffer, size_t sBufferSize,
+                                const void* dict, size_t dictSize,
+                                ZSTD_dictLoadMethod_e dictLoadMethod,
+                                ZSTD_dictContentType_e dictContentType)
+{
+    size_t const neededSpace = sizeof(ZSTD_DDict)
+                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+    assert(sBuffer != NULL);
+    assert(dict != NULL);
+    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
+    if (sBufferSize < neededSpace) return NULL;
+    if (dictLoadMethod == ZSTD_dlm_byCopy) {
+        memcpy(ddict+1, dict, dictSize);  /* local copy */
+        dict = ddict+1;
+    }
+    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+                                              dict, dictSize,
+                                              ZSTD_dlm_byRef, dictContentType) ))
+        return NULL;
+    return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = ddict->cMem;
+        ZSTD_free(ddict->dictBuffer, cMem);
+        ZSTD_free(ddict, cMem);
+        return 0;
+    }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ *  Estimate amount of memory that will be needed to create a dictionary for decompression.
+ *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;
+    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ddict.h b/vendor/github.com/DataDog/zstd/zstd_ddict.h
new file mode 100644
index 0000000..0479d11
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ddict.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DDICT_H
+#define ZSTD_DDICT_H
+
+/*-*******************************************************
+ *  Dependencies
+ *********************************************************/
+#include <stddef.h>   /* size_t */
+#include "zstd.h"     /* ZSTD_DDict, and several public functions */
+
+
+/*-*******************************************************
+ *  Interface
+ *********************************************************/
+
+/* note: several prototypes are already published in `zstd.h` :
+ * ZSTD_createDDict()
+ * ZSTD_createDDict_byReference()
+ * ZSTD_createDDict_advanced()
+ * ZSTD_freeDDict()
+ * ZSTD_initStaticDDict()
+ * ZSTD_sizeof_DDict()
+ * ZSTD_estimateDDictSize()
+ * ZSTD_getDictID_fromDict()
+ */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+
+#endif /* ZSTD_DDICT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress.c b/vendor/github.com/DataDog/zstd/zstd_decompress.c
index 3ec6a1c..e42872a 100644
--- a/vendor/github.com/DataDog/zstd/zstd_decompress.c
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress.c
@@ -37,7 +37,18 @@
  *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
  */
 #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
-#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
+#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
+#endif
+
+/*!
+ *  NO_FORWARD_PROGRESS_MAX :
+ *  maximum allowed nb of calls to ZSTD_decompressStream()
+ *  without any forward progress
+ *  (defined as: no byte read from input, and no byte flushed to output)
+ *  before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
 #endif
 
 
@@ -45,120 +56,25 @@
 *  Dependencies
 *********************************************************/
 #include <string.h>      /* memcpy, memmove, memset */
-#include "cpu.h"
+#include "cpu.h"         /* bmi2 */
 #include "mem.h"         /* low level memory routines */
 #define FSE_STATIC_LINKING_ONLY
 #include "fse.h"
 #define HUF_STATIC_LINKING_ONLY
 #include "huf.h"
-#include "zstd_internal.h"
+#include "zstd_internal.h"  /* blockProperties_t */
+#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
+#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
 
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
 #  include "zstd_legacy.h"
 #endif
 
 
-/*-*************************************
-*  Errors
-***************************************/
-#define ZSTD_isError ERR_isError   /* for inlining */
-#define FSE_isError  ERR_isError
-#define HUF_isError  ERR_isError
-
-
-/*_*******************************************************
-*  Memory operations
-**********************************************************/
-static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
-
-
 /*-*************************************************************
 *   Context management
 ***************************************************************/
-typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
-               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
-               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
-               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
-
-typedef enum { zdss_init=0, zdss_loadHeader,
-               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
-
-
-typedef struct {
-    U32 fastMode;
-    U32 tableLog;
-} ZSTD_seqSymbol_header;
-
-typedef struct {
-    U16  nextState;
-    BYTE nbAdditionalBits;
-    BYTE nbBits;
-    U32  baseValue;
-} ZSTD_seqSymbol;
-
-#define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
-
-typedef struct {
-    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];
-    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];
-    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];
-    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
-    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    U32 rep[ZSTD_REP_NUM];
-} ZSTD_entropyDTables_t;
-
-struct ZSTD_DCtx_s
-{
-    const ZSTD_seqSymbol* LLTptr;
-    const ZSTD_seqSymbol* MLTptr;
-    const ZSTD_seqSymbol* OFTptr;
-    const HUF_DTable* HUFptr;
-    ZSTD_entropyDTables_t entropy;
-    const void* previousDstEnd;   /* detect continuity */
-    const void* base;             /* start of current segment */
-    const void* vBase;            /* virtual start of previous segment if it was just before current one */
-    const void* dictEnd;          /* end of previous segment */
-    size_t expected;
-    ZSTD_frameHeader fParams;
-    U64 decodedSize;
-    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
-    ZSTD_dStage stage;
-    U32 litEntropy;
-    U32 fseEntropy;
-    XXH64_state_t xxhState;
-    size_t headerSize;
-    U32 dictID;
-    ZSTD_format_e format;
-    const BYTE* litPtr;
-    ZSTD_customMem customMem;
-    size_t litSize;
-    size_t rleSize;
-    size_t staticSize;
-    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
-
-    /* streaming */
-    ZSTD_DDict* ddictLocal;
-    const ZSTD_DDict* ddict;
-    ZSTD_dStreamStage streamStage;
-    char*  inBuff;
-    size_t inBuffSize;
-    size_t inPos;
-    size_t maxWindowSize;
-    char*  outBuff;
-    size_t outBuffSize;
-    size_t outStart;
-    size_t outEnd;
-    size_t lhSize;
-    void* legacyContext;
-    U32 previousLegacyVersion;
-    U32 legacyVersion;
-    U32 hostageByte;
-
-    /* workspace */
-    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
-    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
-};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
-
 size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
 {
     if (dctx==NULL) return 0;   /* support sizeof NULL */
@@ -173,8 +89,8 @@
 static size_t ZSTD_startingInputLength(ZSTD_format_e format)
 {
     size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
-                    ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
-                    ZSTD_frameHeaderSize_prefix;
+                    ZSTD_FRAMEHEADERSIZE_PREFIX - ZSTD_FRAMEIDSIZE :
+                    ZSTD_FRAMEHEADERSIZE_PREFIX;
     ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
     /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
     assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
@@ -188,10 +104,16 @@
     dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
     dctx->ddict       = NULL;
     dctx->ddictLocal  = NULL;
+    dctx->dictEnd     = NULL;
+    dctx->ddictIsCold = 0;
+    dctx->dictUses = ZSTD_dont_use;
     dctx->inBuff      = NULL;
     dctx->inBuffSize  = 0;
     dctx->outBuffSize = 0;
     dctx->streamStage = zdss_init;
+    dctx->legacyContext = NULL;
+    dctx->previousLegacyVersion = 0;
+    dctx->noForwardProgress = 0;
     dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
 }
 
@@ -215,8 +137,6 @@
     {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
         if (!dctx) return NULL;
         dctx->customMem = customMem;
-        dctx->legacyContext = NULL;
-        dctx->previousLegacyVersion = 0;
         ZSTD_initDCtx_internal(dctx);
         return dctx;
     }
@@ -228,13 +148,20 @@
     return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
 }
 
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+    ZSTD_freeDDict(dctx->ddictLocal);
+    dctx->ddictLocal = NULL;
+    dctx->ddict = NULL;
+    dctx->dictUses = ZSTD_dont_use;
+}
+
 size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
 {
     if (dctx==NULL) return 0;   /* support free on NULL */
-    if (dctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static DCtx */
+    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
     {   ZSTD_customMem const cMem = dctx->customMem;
-        ZSTD_freeDDict(dctx->ddictLocal);
-        dctx->ddictLocal = NULL;
+        ZSTD_clearDict(dctx);
         ZSTD_free(dctx->inBuff, cMem);
         dctx->inBuff = NULL;
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
@@ -265,10 +192,10 @@
  *  Note 3 : Skippable Frame Identifiers are considered valid. */
 unsigned ZSTD_isFrame(const void* buffer, size_t size)
 {
-    if (size < ZSTD_frameIdSize) return 0;
+    if (size < ZSTD_FRAMEIDSIZE) return 0;
     {   U32 const magic = MEM_readLE32(buffer);
         if (magic == ZSTD_MAGICNUMBER) return 1;
-        if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
     }
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
     if (ZSTD_isLegacy(buffer, size)) return 1;
@@ -284,7 +211,7 @@
 static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
 {
     size_t const minInputSize = ZSTD_startingInputLength(format);
-    if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
 
     {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
         U32 const dictID= fhd & 3;
@@ -298,38 +225,41 @@
 
 /** ZSTD_frameHeaderSize() :
  *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
- * @return : size of the Frame Header */
+ * @return : size of the Frame Header,
+ *           or an error code (if srcSize is too small) */
 size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
 {
     return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
 }
 
 
-/** ZSTD_getFrameHeader_internal() :
+/** ZSTD_getFrameHeader_advanced() :
  *  decode Frame Header, or require larger `srcSize`.
  *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
  * @return : 0, `zfhPtr` is correctly filled,
  *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
  *           or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t const minInputSize = ZSTD_startingInputLength(format);
 
+    memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
     if (srcSize < minInputSize) return minInputSize;
+    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
 
     if ( (format != ZSTD_f_zstd1_magicless)
       && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
-        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
             /* skippable frame */
-            if (srcSize < ZSTD_skippableHeaderSize)
-                return ZSTD_skippableHeaderSize; /* magic number + frame length */
+            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
+                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
             memset(zfhPtr, 0, sizeof(*zfhPtr));
-            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
+            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
             zfhPtr->frameType = ZSTD_skippableFrame;
             return 0;
         }
-        return ERROR(prefix_unknown);
+        RETURN_ERROR(prefix_unknown);
     }
 
     /* ensure there is enough `srcSize` to fully read/decode frame header */
@@ -347,14 +277,13 @@
         U64 windowSize = 0;
         U32 dictID = 0;
         U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
-        if ((fhdByte & 0x08) != 0)
-            return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
+        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+                        "reserved bits, must be zero");
 
         if (!singleSegment) {
             BYTE const wlByte = ip[pos++];
             U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
-            if (windowLog > ZSTD_WINDOWLOG_MAX)
-                return ERROR(frameParameter_windowTooLarge);
+            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
             windowSize = (1ULL << windowLog);
             windowSize += (windowSize >> 3) * (wlByte&7);
         }
@@ -394,7 +323,7 @@
  *           or an error code, which can be tested using ZSTD_isError() */
 size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
 {
-    return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
 }
 
 
@@ -421,6 +350,23 @@
     }   }
 }
 
+static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+{
+    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
+    U32 sizeU32;
+
+    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
+
+    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
+    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+                    frameParameter_unsupported);
+    {
+        size_t const skippableSize = skippableHeaderSize + sizeU32;
+        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong);
+        return skippableSize;
+    }
+}
+
 /** ZSTD_findDecompressedSize() :
  *  compatible with legacy mode
  *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
@@ -430,18 +376,15 @@
 {
     unsigned long long totalDstSize = 0;
 
-    while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
         U32 const magicNumber = MEM_readLE32(src);
 
-        if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
-            size_t skippableSize;
-            if (srcSize < ZSTD_skippableHeaderSize)
-                return ERROR(srcSize_wrong);
-            skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
-                          + ZSTD_skippableHeaderSize;
-            if (srcSize < skippableSize) {
+        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+            if (ZSTD_isError(skippableSize)) {
                 return ZSTD_CONTENTSIZE_ERROR;
             }
+            assert(skippableSize <= srcSize);
 
             src = (const BYTE *)src + skippableSize;
             srcSize -= skippableSize;
@@ -471,9 +414,9 @@
 }
 
 /** ZSTD_getDecompressedSize() :
-*   compatible with legacy mode
-*   @return : decompressed size if known, 0 otherwise
-              note : 0 can mean any of the following :
+ *  compatible with legacy mode
+ * @return : decompressed size if known, 0 otherwise
+             note : 0 can mean any of the following :
                    - frame content is empty
                    - decompressed size field is not present in frame header
                    - frame header unknown / not supported
@@ -487,1255 +430,98 @@
 
 
 /** ZSTD_decodeFrameHeader() :
-*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
-*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
 static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
 {
-    size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format);
+    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
     if (ZSTD_isError(result)) return result;    /* invalid header */
-    if (result>0) return ERROR(srcSize_wrong);  /* headerSize too small */
-    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
-        return ERROR(dictionary_wrong);
+    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+    /* Skip the dictID check in fuzzing mode, because it makes the search
+     * harder.
+     */
+    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+                    dictionary_wrong);
+#endif
     if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
     return 0;
 }
 
-
-/*-*************************************************************
- *   Block decoding
- ***************************************************************/
-
-/*! ZSTD_getcBlockSize() :
-*   Provides the size of compressed block from block header `src` */
-size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
-                          blockProperties_t* bpPtr)
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
 {
-    if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
-    {   U32 const cBlockHeader = MEM_readLE24(src);
-        U32 const cSize = cBlockHeader >> 3;
-        bpPtr->lastBlock = cBlockHeader & 1;
-        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
-        bpPtr->origSize = cSize;   /* only useful for RLE */
-        if (bpPtr->blockType == bt_rle) return 1;
-        if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
-        return cSize;
-    }
+    ZSTD_frameSizeInfo frameSizeInfo;
+    frameSizeInfo.compressedSize = ret;
+    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+    return frameSizeInfo;
 }
 
-
-static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
-                          const void* src, size_t srcSize)
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
 {
-    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
-    memcpy(dst, src, srcSize);
-    return srcSize;
-}
+    ZSTD_frameSizeInfo frameSizeInfo;
+    memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
 
-
-static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                               size_t regenSize)
-{
-    if (srcSize != 1) return ERROR(srcSize_wrong);
-    if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
-    memset(dst, *(const BYTE*)src, regenSize);
-    return regenSize;
-}
-
-/*! ZSTD_decodeLiteralsBlock() :
- * @return : nb of bytes read from src (< srcSize )
- *  note : symbol not declared but exposed for fullbench */
-size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
-{
-    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
-
-    {   const BYTE* const istart = (const BYTE*) src;
-        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
-
-        switch(litEncType)
-        {
-        case set_repeat:
-            if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
-            /* fall-through */
-        case set_compressed:
-            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
-            {   size_t lhSize, litSize, litCSize;
-                U32 singleStream=0;
-                U32 const lhlCode = (istart[0] >> 2) & 3;
-                U32 const lhc = MEM_readLE32(istart);
-                switch(lhlCode)
-                {
-                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    /* 2 - 2 - 10 - 10 */
-                    singleStream = !lhlCode;
-                    lhSize = 3;
-                    litSize  = (lhc >> 4) & 0x3FF;
-                    litCSize = (lhc >> 14) & 0x3FF;
-                    break;
-                case 2:
-                    /* 2 - 2 - 14 - 14 */
-                    lhSize = 4;
-                    litSize  = (lhc >> 4) & 0x3FFF;
-                    litCSize = lhc >> 18;
-                    break;
-                case 3:
-                    /* 2 - 2 - 18 - 18 */
-                    lhSize = 5;
-                    litSize  = (lhc >> 4) & 0x3FFFF;
-                    litCSize = (lhc >> 22) + (istart[4] << 10);
-                    break;
-                }
-                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
-                if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
-
-                if (HUF_isError((litEncType==set_repeat) ?
-                                    ( singleStream ?
-                                        HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
-                                        HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
-                                    ( singleStream ?
-                                        HUF_decompress1X2_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                                                                         dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) :
-                                        HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                                                                           dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2))))
-                    return ERROR(corruption_detected);
-
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                dctx->litEntropy = 1;
-                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
-                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                return litCSize + lhSize;
-            }
-
-        case set_basic:
-            {   size_t litSize, lhSize;
-                U32 const lhlCode = ((istart[0]) >> 2) & 3;
-                switch(lhlCode)
-                {
-                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    lhSize = 1;
-                    litSize = istart[0] >> 3;
-                    break;
-                case 1:
-                    lhSize = 2;
-                    litSize = MEM_readLE16(istart) >> 4;
-                    break;
-                case 3:
-                    lhSize = 3;
-                    litSize = MEM_readLE24(istart) >> 4;
-                    break;
-                }
-
-                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
-                    if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
-                    memcpy(dctx->litBuffer, istart+lhSize, litSize);
-                    dctx->litPtr = dctx->litBuffer;
-                    dctx->litSize = litSize;
-                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
-                    return lhSize+litSize;
-                }
-                /* direct reference into compressed stream */
-                dctx->litPtr = istart+lhSize;
-                dctx->litSize = litSize;
-                return lhSize+litSize;
-            }
-
-        case set_rle:
-            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
-                size_t litSize, lhSize;
-                switch(lhlCode)
-                {
-                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
-                    lhSize = 1;
-                    litSize = istart[0] >> 3;
-                    break;
-                case 1:
-                    lhSize = 2;
-                    litSize = MEM_readLE16(istart) >> 4;
-                    break;
-                case 3:
-                    lhSize = 3;
-                    litSize = MEM_readLE24(istart) >> 4;
-                    if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
-                    break;
-                }
-                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
-                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
-                dctx->litPtr = dctx->litBuffer;
-                dctx->litSize = litSize;
-                return lhSize+1;
-            }
-        default:
-            return ERROR(corruption_detected);   /* impossible */
-        }
-    }
-}
-
-/* Default FSE distribution tables.
- * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
- * They were generated programmatically with following method :
- * - start from default distributions, present in /lib/common/zstd_internal.h
- * - generate tables normally, using ZSTD_buildFSETable()
- * - printout the content of tables
- * - pretify output, report below, test with fuzzer to ensure it's correct */
-
-/* Default FSE distribution table for Literal Lengths */
-static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
-     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-     /* nextState, nbAddBits, nbBits, baseVal */
-     {  0,  0,  4,    0},  { 16,  0,  4,    0},
-     { 32,  0,  5,    1},  {  0,  0,  5,    3},
-     {  0,  0,  5,    4},  {  0,  0,  5,    6},
-     {  0,  0,  5,    7},  {  0,  0,  5,    9},
-     {  0,  0,  5,   10},  {  0,  0,  5,   12},
-     {  0,  0,  6,   14},  {  0,  1,  5,   16},
-     {  0,  1,  5,   20},  {  0,  1,  5,   22},
-     {  0,  2,  5,   28},  {  0,  3,  5,   32},
-     {  0,  4,  5,   48},  { 32,  6,  5,   64},
-     {  0,  7,  5,  128},  {  0,  8,  6,  256},
-     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
-     { 32,  0,  4,    0},  {  0,  0,  4,    1},
-     {  0,  0,  5,    2},  { 32,  0,  5,    4},
-     {  0,  0,  5,    5},  { 32,  0,  5,    7},
-     {  0,  0,  5,    8},  { 32,  0,  5,   10},
-     {  0,  0,  5,   11},  {  0,  0,  6,   13},
-     { 32,  1,  5,   16},  {  0,  1,  5,   18},
-     { 32,  1,  5,   22},  {  0,  2,  5,   24},
-     { 32,  3,  5,   32},  {  0,  3,  5,   40},
-     {  0,  6,  4,   64},  { 16,  6,  4,   64},
-     { 32,  7,  5,  128},  {  0,  9,  6,  512},
-     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
-     { 16,  0,  4,    1},  { 32,  0,  5,    2},
-     { 32,  0,  5,    3},  { 32,  0,  5,    5},
-     { 32,  0,  5,    6},  { 32,  0,  5,    8},
-     { 32,  0,  5,    9},  { 32,  0,  5,   11},
-     { 32,  0,  5,   12},  {  0,  0,  6,   15},
-     { 32,  1,  5,   18},  { 32,  1,  5,   20},
-     { 32,  2,  5,   24},  { 32,  2,  5,   28},
-     { 32,  3,  5,   40},  { 32,  4,  5,   48},
-     {  0, 16,  6,65536},  {  0, 15,  6,32768},
-     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
-};   /* LL_defaultDTable */
-
-/* Default FSE distribution table for Offset Codes */
-static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
-    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-    /* nextState, nbAddBits, nbBits, baseVal */
-    {  0,  0,  5,    0},     {  0,  6,  4,   61},
-    {  0,  9,  5,  509},     {  0, 15,  5,32765},
-    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
-    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
-    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
-    {  0,  5,  5,   29},     {  0,  8,  4,  253},
-    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
-    {  0,  2,  5,    1},     { 16,  7,  4,  125},
-    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
-    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
-    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
-    {  0, 19,  5,524285},    {  0,  1,  5,    1},
-    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
-    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
-    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
-    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
-};   /* OF_defaultDTable */
-
-
-/* Default FSE distribution table for Match Lengths */
-static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
-    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
-    /* nextState, nbAddBits, nbBits, baseVal */
-    {  0,  0,  6,    3},  {  0,  0,  4,    4},
-    { 32,  0,  5,    5},  {  0,  0,  5,    6},
-    {  0,  0,  5,    8},  {  0,  0,  5,    9},
-    {  0,  0,  5,   11},  {  0,  0,  6,   13},
-    {  0,  0,  6,   16},  {  0,  0,  6,   19},
-    {  0,  0,  6,   22},  {  0,  0,  6,   25},
-    {  0,  0,  6,   28},  {  0,  0,  6,   31},
-    {  0,  0,  6,   34},  {  0,  1,  6,   37},
-    {  0,  1,  6,   41},  {  0,  2,  6,   47},
-    {  0,  3,  6,   59},  {  0,  4,  6,   83},
-    {  0,  7,  6,  131},  {  0,  9,  6,  515},
-    { 16,  0,  4,    4},  {  0,  0,  4,    5},
-    { 32,  0,  5,    6},  {  0,  0,  5,    7},
-    { 32,  0,  5,    9},  {  0,  0,  5,   10},
-    {  0,  0,  6,   12},  {  0,  0,  6,   15},
-    {  0,  0,  6,   18},  {  0,  0,  6,   21},
-    {  0,  0,  6,   24},  {  0,  0,  6,   27},
-    {  0,  0,  6,   30},  {  0,  0,  6,   33},
-    {  0,  1,  6,   35},  {  0,  1,  6,   39},
-    {  0,  2,  6,   43},  {  0,  3,  6,   51},
-    {  0,  4,  6,   67},  {  0,  5,  6,   99},
-    {  0,  8,  6,  259},  { 32,  0,  4,    4},
-    { 48,  0,  4,    4},  { 16,  0,  4,    5},
-    { 32,  0,  5,    7},  { 32,  0,  5,    8},
-    { 32,  0,  5,   10},  { 32,  0,  5,   11},
-    {  0,  0,  6,   14},  {  0,  0,  6,   17},
-    {  0,  0,  6,   20},  {  0,  0,  6,   23},
-    {  0,  0,  6,   26},  {  0,  0,  6,   29},
-    {  0,  0,  6,   32},  {  0, 16,  6,65539},
-    {  0, 15,  6,32771},  {  0, 14,  6,16387},
-    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
-    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
-};   /* ML_defaultDTable */
-
-
-static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
-{
-    void* ptr = dt;
-    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
-    ZSTD_seqSymbol* const cell = dt + 1;
-
-    DTableH->tableLog = 0;
-    DTableH->fastMode = 0;
-
-    cell->nbBits = 0;
-    cell->nextState = 0;
-    assert(nbAddBits < 255);
-    cell->nbAdditionalBits = (BYTE)nbAddBits;
-    cell->baseValue = baseValue;
-}
-
-
-/* ZSTD_buildFSETable() :
- * generate FSE decoding table for one symbol (ll, ml or off) */
-static void
-ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
-    const short* normalizedCounter, unsigned maxSymbolValue,
-    const U32* baseValue, const U32* nbAdditionalBits,
-    unsigned tableLog)
-{
-    ZSTD_seqSymbol* const tableDecode = dt+1;
-    U16 symbolNext[MaxSeq+1];
-
-    U32 const maxSV1 = maxSymbolValue + 1;
-    U32 const tableSize = 1 << tableLog;
-    U32 highThreshold = tableSize-1;
-
-    /* Sanity Checks */
-    assert(maxSymbolValue <= MaxSeq);
-    assert(tableLog <= MaxFSELog);
-
-    /* Init, lay down lowprob symbols */
-    {   ZSTD_seqSymbol_header DTableH;
-        DTableH.tableLog = tableLog;
-        DTableH.fastMode = 1;
-        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
-            U32 s;
-            for (s=0; s<maxSV1; s++) {
-                if (normalizedCounter[s]==-1) {
-                    tableDecode[highThreshold--].baseValue = s;
-                    symbolNext[s] = 1;
-                } else {
-                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
-                    symbolNext[s] = normalizedCounter[s];
-        }   }   }
-        memcpy(dt, &DTableH, sizeof(DTableH));
-    }
-
-    /* Spread symbols */
-    {   U32 const tableMask = tableSize-1;
-        U32 const step = FSE_TABLESTEP(tableSize);
-        U32 s, position = 0;
-        for (s=0; s<maxSV1; s++) {
-            int i;
-            for (i=0; i<normalizedCounter[s]; i++) {
-                tableDecode[position].baseValue = s;
-                position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
-        }   }
-        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
-    }
-
-    /* Build Decoding table */
-    {   U32 u;
-        for (u=0; u<tableSize; u++) {
-            U32 const symbol = tableDecode[u].baseValue;
-            U32 const nextState = symbolNext[symbol]++;
-            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
-            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
-            assert(nbAdditionalBits[symbol] < 255);
-            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
-            tableDecode[u].baseValue = baseValue[symbol];
-    }   }
-}
-
-
-/*! ZSTD_buildSeqTable() :
- * @return : nb bytes read from src,
- *           or an error code if it fails */
-static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
-                                 symbolEncodingType_e type, U32 max, U32 maxLog,
-                                 const void* src, size_t srcSize,
-                                 const U32* baseValue, const U32* nbAdditionalBits,
-                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable)
-{
-    switch(type)
-    {
-    case set_rle :
-        if (!srcSize) return ERROR(srcSize_wrong);
-        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
-        {   U32 const symbol = *(const BYTE*)src;
-            U32 const baseline = baseValue[symbol];
-            U32 const nbBits = nbAdditionalBits[symbol];
-            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
-        }
-        *DTablePtr = DTableSpace;
-        return 1;
-    case set_basic :
-        *DTablePtr = defaultTable;
-        return 0;
-    case set_repeat:
-        if (!flagRepeatTable) return ERROR(corruption_detected);
-        return 0;
-    case set_compressed :
-        {   U32 tableLog;
-            S16 norm[MaxSeq+1];
-            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
-            if (FSE_isError(headerSize)) return ERROR(corruption_detected);
-            if (tableLog > maxLog) return ERROR(corruption_detected);
-            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
-            *DTablePtr = DTableSpace;
-            return headerSize;
-        }
-    default :   /* impossible */
-        assert(0);
-        return ERROR(GENERIC);
-    }
-}
-
-static const U32 LL_base[MaxLL+1] = {
-                 0,    1,    2,     3,     4,     5,     6,      7,
-                 8,    9,   10,    11,    12,    13,    14,     15,
-                16,   18,   20,    22,    24,    28,    32,     40,
-                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
-                0x2000, 0x4000, 0x8000, 0x10000 };
-
-static const U32 OF_base[MaxOff+1] = {
-                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
-                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
-                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
-                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
-
-static const U32 OF_bits[MaxOff+1] = {
-                     0,  1,  2,  3,  4,  5,  6,  7,
-                     8,  9, 10, 11, 12, 13, 14, 15,
-                    16, 17, 18, 19, 20, 21, 22, 23,
-                    24, 25, 26, 27, 28, 29, 30, 31 };
-
-static const U32 ML_base[MaxML+1] = {
-                     3,  4,  5,    6,     7,     8,     9,    10,
-                    11, 12, 13,   14,    15,    16,    17,    18,
-                    19, 20, 21,   22,    23,    24,    25,    26,
-                    27, 28, 29,   30,    31,    32,    33,    34,
-                    35, 37, 39,   41,    43,    47,    51,    59,
-                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
-                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
-
-
-size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
-                             const void* src, size_t srcSize)
-{
-    const BYTE* const istart = (const BYTE* const)src;
-    const BYTE* const iend = istart + srcSize;
-    const BYTE* ip = istart;
-    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
-
-    /* check */
-    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
-
-    /* SeqHead */
-    {   int nbSeq = *ip++;
-        if (!nbSeq) { *nbSeqPtr=0; return 1; }
-        if (nbSeq > 0x7F) {
-            if (nbSeq == 0xFF) {
-                if (ip+2 > iend) return ERROR(srcSize_wrong);
-                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
-            } else {
-                if (ip >= iend) return ERROR(srcSize_wrong);
-                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
-            }
-        }
-        *nbSeqPtr = nbSeq;
-    }
-
-    /* FSE table descriptors */
-    if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
-    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
-        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
-        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
-        ip++;
-
-        /* Build DTables */
-        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
-                                                      LLtype, MaxLL, LLFSELog,
-                                                      ip, iend-ip,
-                                                      LL_base, LL_bits,
-                                                      LL_defaultDTable, dctx->fseEntropy);
-            if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
-            ip += llhSize;
-        }
-
-        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
-                                                      OFtype, MaxOff, OffFSELog,
-                                                      ip, iend-ip,
-                                                      OF_base, OF_bits,
-                                                      OF_defaultDTable, dctx->fseEntropy);
-            if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
-            ip += ofhSize;
-        }
-
-        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
-                                                      MLtype, MaxML, MLFSELog,
-                                                      ip, iend-ip,
-                                                      ML_base, ML_bits,
-                                                      ML_defaultDTable, dctx->fseEntropy);
-            if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
-            ip += mlhSize;
-        }
-    }
-
-    return ip-istart;
-}
-
-
-typedef struct {
-    size_t litLength;
-    size_t matchLength;
-    size_t offset;
-    const BYTE* match;
-} seq_t;
-
-typedef struct {
-    size_t state;
-    const ZSTD_seqSymbol* table;
-} ZSTD_fseState;
-
-typedef struct {
-    BIT_DStream_t DStream;
-    ZSTD_fseState stateLL;
-    ZSTD_fseState stateOffb;
-    ZSTD_fseState stateML;
-    size_t prevOffset[ZSTD_REP_NUM];
-    const BYTE* prefixStart;
-    const BYTE* dictEnd;
-    size_t pos;
-} seqState_t;
-
-
-FORCE_NOINLINE
-size_t ZSTD_execSequenceLast7(BYTE* op,
-                              BYTE* const oend, seq_t sequence,
-                              const BYTE** litPtr, const BYTE* const litLimit,
-                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-    if (oLitEnd <= oend_w) return ERROR(GENERIC);   /* Precondition */
-
-    /* copy literals */
-    if (op < oend_w) {
-        ZSTD_wildcopy(op, *litPtr, oend_w - op);
-        *litPtr += oend_w - op;
-        op = oend_w;
-    }
-    while (op < oLitEnd) *op++ = *(*litPtr)++;
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
-        match = dictEnd - (base-match);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-    }   }
-    while (op < oMatchEnd) *op++ = *match++;
-    return sequenceLength;
-}
-
-
-HINT_INLINE
-size_t ZSTD_execSequence(BYTE* op,
-                         BYTE* const oend, seq_t sequence,
-                         const BYTE** litPtr, const BYTE* const litLimit,
-                         const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = oLitEnd - sequence.offset;
-
-    /* check */
-    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
-
-    /* copy Literals */
-    ZSTD_copy8(op, *litPtr);
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
-        /* offset beyond prefix -> go into extDict */
-        if (sequence.offset > (size_t)(oLitEnd - vBase))
-            return ERROR(corruption_detected);
-        match = dictEnd + (match - base);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = base;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
-    }   }
-    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-
-HINT_INLINE
-size_t ZSTD_execSequenceLong(BYTE* op,
-                             BYTE* const oend, seq_t sequence,
-                             const BYTE** litPtr, const BYTE* const litLimit,
-                             const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
-{
-    BYTE* const oLitEnd = op + sequence.litLength;
-    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
-    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
-    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
-    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
-    const BYTE* match = sequence.match;
-
-    /* check */
-    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
-    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-    if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
-
-    /* copy Literals */
-    ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
-    if (sequence.litLength > 8)
-        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
-    op = oLitEnd;
-    *litPtr = iLitEnd;   /* update for next sequence */
-
-    /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
-        /* offset beyond prefix */
-        if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
-        if (match + sequence.matchLength <= dictEnd) {
-            memmove(oLitEnd, match, sequence.matchLength);
-            return sequenceLength;
-        }
-        /* span extDict & currentPrefixSegment */
-        {   size_t const length1 = dictEnd - match;
-            memmove(oLitEnd, match, length1);
-            op = oLitEnd + length1;
-            sequence.matchLength -= length1;
-            match = prefixStart;
-            if (op > oend_w || sequence.matchLength < MINMATCH) {
-              U32 i;
-              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
-              return sequenceLength;
-            }
-    }   }
-    assert(op <= oend_w);
-    assert(sequence.matchLength >= MINMATCH);
-
-    /* match within prefix */
-    if (sequence.offset < 8) {
-        /* close range match, overlap */
-        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
-        int const sub2 = dec64table[sequence.offset];
-        op[0] = match[0];
-        op[1] = match[1];
-        op[2] = match[2];
-        op[3] = match[3];
-        match += dec32table[sequence.offset];
-        ZSTD_copy4(op+4, match);
-        match -= sub2;
-    } else {
-        ZSTD_copy8(op, match);
-    }
-    op += 8; match += 8;
-
-    if (oMatchEnd > oend-(16-MINMATCH)) {
-        if (op < oend_w) {
-            ZSTD_wildcopy(op, match, oend_w - op);
-            match += oend_w - op;
-            op = oend_w;
-        }
-        while (op < oMatchEnd) *op++ = *match++;
-    } else {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
-    }
-    return sequenceLength;
-}
-
-static void
-ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
-{
-    const void* ptr = dt;
-    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
-    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
-    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
-                (U32)DStatePtr->state, DTableH->tableLog);
-    BIT_reloadDStream(bitD);
-    DStatePtr->table = dt + 1;
-}
-
-FORCE_INLINE_TEMPLATE void
-ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
-{
-    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
-    U32 const nbBits = DInfo.nbBits;
-    size_t const lowBits = BIT_readBits(bitD, nbBits);
-    DStatePtr->state = DInfo.nextState + lowBits;
-}
-
-/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
- * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
- * bits before reloading. This value is the maximum number of bytes we read
- * after reloading when we are decoding long offets.
- */
-#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
-    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
-        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
-        : 0)
-
-typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
-
-FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
-{
-    seq_t seq;
-    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
-    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
-    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
-    U32 const totalBits = llBits+mlBits+ofBits;
-    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
-    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
-    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofBits)
-            offset = 0;
-        else {
-            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
-            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
-            assert(ofBits <= MaxOff);
-            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
-                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
-                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
-                BIT_reloadDStream(&seqState->DStream);
-                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
-                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
-            } else {
-                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
-                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
-            }
-        }
-
-        if (ofBits <= 1) {
-            offset += (llBase==0);
-            if (offset) {
-                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
-                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
-                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-            } else {  /* offset == 0 */
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq.offset = offset;
-    }
-
-    seq.matchLength = mlBase
-                    + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0);  /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
-        BIT_reloadDStream(&seqState->DStream);
-    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
-        BIT_reloadDStream(&seqState->DStream);
-    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
-    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
-
-    seq.litLength = llBase
-                  + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0);    /* <=  16 bits */
-    if (MEM_32bits())
-        BIT_reloadDStream(&seqState->DStream);
-
-    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
-                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
-
-    /* ANS state update */
-    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
-    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
-    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
-    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
-
-    return seq;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize, int nbSeq,
-                         const ZSTD_longOffset_e isLongOffset)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-    DEBUGLOG(5, "ZSTD_decompressSequences");
-
-    /* Regen sequences */
-    if (nbSeq) {
-        seqState_t seqState;
-        dctx->fseEntropy = 1;
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
-        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
-        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
-        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
-        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
-            nbSeq--;
-            {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
-                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
-                DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
-                if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-                op += oneSeqSize;
-        }   }
-
-        /* check if reached exact end */
-        DEBUGLOG(5, "ZSTD_decompressSequences: after decode loop, remaining nbSeq : %i", nbSeq);
-        if (nbSeq) return ERROR(corruption_detected);
-        /* save reps for next block */
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t
-ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
-
-
-FORCE_INLINE_TEMPLATE seq_t
-ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
-{
-    seq_t seq;
-    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
-    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
-    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
-    U32 const totalBits = llBits+mlBits+ofBits;
-    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
-    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
-    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
-
-    /* sequence */
-    {   size_t offset;
-        if (!ofBits)
-            offset = 0;
-        else {
-            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
-            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
-            assert(ofBits <= MaxOff);
-            if (MEM_32bits() && longOffsets) {
-                U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
-                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
-                if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
-                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
-            } else {
-                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
-                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
-            }
-        }
-
-        if (ofBits <= 1) {
-            offset += (llBase==0);
-            if (offset) {
-                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
-                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
-                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
-                seqState->prevOffset[1] = seqState->prevOffset[0];
-                seqState->prevOffset[0] = offset = temp;
-            } else {
-                offset = seqState->prevOffset[0];
-            }
-        } else {
-            seqState->prevOffset[2] = seqState->prevOffset[1];
-            seqState->prevOffset[1] = seqState->prevOffset[0];
-            seqState->prevOffset[0] = offset;
-        }
-        seq.offset = offset;
-    }
-
-    seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
-    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
-        BIT_reloadDStream(&seqState->DStream);
-    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
-        BIT_reloadDStream(&seqState->DStream);
-    /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
-    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
-
-    seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
-    if (MEM_32bits())
-        BIT_reloadDStream(&seqState->DStream);
-
-    {   size_t const pos = seqState->pos + seq.litLength;
-        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
-        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
-                                                    * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
-        seqState->pos = pos + seq.matchLength;
-    }
-
-    /* ANS state update */
-    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
-    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
-    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
-    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
-
-    return seq;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_decompressSequencesLong_body(
-                               ZSTD_DCtx* dctx,
-                               void* dst, size_t maxDstSize,
-                         const void* seqStart, size_t seqSize, int nbSeq,
-                         const ZSTD_longOffset_e isLongOffset)
-{
-    const BYTE* ip = (const BYTE*)seqStart;
-    const BYTE* const iend = ip + seqSize;
-    BYTE* const ostart = (BYTE* const)dst;
-    BYTE* const oend = ostart + maxDstSize;
-    BYTE* op = ostart;
-    const BYTE* litPtr = dctx->litPtr;
-    const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const prefixStart = (const BYTE*) (dctx->base);
-    const BYTE* const dictStart = (const BYTE*) (dctx->vBase);
-    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-
-    /* Regen sequences */
-    if (nbSeq) {
-#define STORED_SEQS 4
-#define STOSEQ_MASK (STORED_SEQS-1)
-#define ADVANCED_SEQS 4
-        seq_t sequences[STORED_SEQS];
-        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
-        seqState_t seqState;
-        int seqNb;
-        dctx->fseEntropy = 1;
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
-        seqState.prefixStart = prefixStart;
-        seqState.pos = (size_t)(op-prefixStart);
-        seqState.dictEnd = dictEnd;
-        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
-        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
-        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
-        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
-
-        /* prepare in advance */
-        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
-            sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
-        }
-        if (seqNb<seqAdvance) return ERROR(corruption_detected);
-
-        /* decode and decompress */
-        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
-            seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            PREFETCH(sequence.match);  /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
-            sequences[seqNb&STOSEQ_MASK] = sequence;
-            op += oneSeqSize;
-        }
-        if (seqNb<nbSeq) return ERROR(corruption_detected);
-
-        /* finish queue */
-        seqNb -= seqAdvance;
-        for ( ; seqNb<nbSeq ; seqNb++) {
-            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
-            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
-            op += oneSeqSize;
-        }
-
-        /* save reps for next block */
-        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
-#undef STORED_SEQS
-#undef STOSEQ_MASK
-#undef ADVANCED_SEQS
-    }
-
-    /* last literal segment */
-    {   size_t const lastLLSize = litEnd - litPtr;
-        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
-        memcpy(op, litPtr, lastLLSize);
-        op += lastLLSize;
-    }
-
-    return op-ostart;
-}
-
-static size_t
-ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
-
-
-#if DYNAMIC_BMI2
-
-static TARGET_ATTRIBUTE("bmi2") size_t
-ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
-static TARGET_ATTRIBUTE("bmi2") size_t
-ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
-                                 void* dst, size_t maxDstSize,
-                           const void* seqStart, size_t seqSize, int nbSeq,
-                           const ZSTD_longOffset_e isLongOffset)
-{
-    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(src, srcSize))
+        return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
 #endif
 
-typedef size_t (*ZSTD_decompressSequences_t)(
-    ZSTD_DCtx *dctx, void *dst, size_t maxDstSize,
-    const void *seqStart, size_t seqSize, int nbSeq,
-    const ZSTD_longOffset_e isLongOffset);
+    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+               frameSizeInfo.compressedSize <= srcSize);
+        return frameSizeInfo;
+    } else {
+        const BYTE* ip = (const BYTE*)src;
+        const BYTE* const ipstart = ip;
+        size_t remainingSize = srcSize;
+        size_t nbBlocks = 0;
+        ZSTD_frameHeader zfh;
 
-static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
-                                const void* seqStart, size_t seqSize, int nbSeq,
-                                const ZSTD_longOffset_e isLongOffset)
-{
-    DEBUGLOG(5, "ZSTD_decompressSequences");
-#if DYNAMIC_BMI2
-    if (dctx->bmi2) {
-        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-    }
-#endif
-  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
-static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
-                                void* dst, size_t maxDstSize,
-                                const void* seqStart, size_t seqSize, int nbSeq,
-                                const ZSTD_longOffset_e isLongOffset)
-{
-    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
-#if DYNAMIC_BMI2
-    if (dctx->bmi2) {
-        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-    }
-#endif
-  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
-}
-
-/* ZSTD_getLongOffsetsShare() :
- * condition : offTable must be valid
- * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
- *           compared to maximum possible of (1<<OffFSELog) */
-static unsigned
-ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
-{
-    const void* ptr = offTable;
-    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
-    const ZSTD_seqSymbol* table = offTable + 1;
-    U32 const max = 1 << tableLog;
-    U32 u, total = 0;
-    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
-
-    assert(max <= (1 << OffFSELog));  /* max not too large */
-    for (u=0; u<max; u++) {
-        if (table[u].nbAdditionalBits > 22) total += 1;
-    }
-
-    assert(tableLog <= OffFSELog);
-    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
-
-    return total;
-}
-
-
-static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize, const int frame)
-{   /* blockType == blockCompressed */
-    const BYTE* ip = (const BYTE*)src;
-    /* isLongOffset must be true if there are long offsets.
-     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
-     * We don't expect that to be the case in 64-bit mode.
-     * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit)
-     */
-    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
-    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
-
-    if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
-
-    /* Decode literals section */
-    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
-        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
-        if (ZSTD_isError(litCSize)) return litCSize;
-        ip += litCSize;
-        srcSize -= litCSize;
-    }
-
-    /* Build Decoding Tables */
-    {   int nbSeq;
-        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
-        if (ZSTD_isError(seqHSize)) return seqHSize;
-        ip += seqHSize;
-        srcSize -= seqHSize;
-
-        if ( (!frame || dctx->fParams.windowSize > (1<<24))
-          && (nbSeq>0) ) {  /* could probably use a larger nbSeq limit */
-            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
-            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
-            if (shareLongOffsets >= minShare)
-                return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+        /* Extract Frame Header */
+        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+            if (ZSTD_isError(ret))
+                return ZSTD_errorFrameSizeInfo(ret);
+            if (ret > 0)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
         }
 
-        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+        ip += zfh.headerSize;
+        remainingSize -= zfh.headerSize;
+
+        /* Iterate over each block */
+        while (1) {
+            blockProperties_t blockProperties;
+            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+            if (ZSTD_isError(cBlockSize))
+                return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+            ip += ZSTD_blockHeaderSize + cBlockSize;
+            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+            nbBlocks++;
+
+            if (blockProperties.lastBlock) break;
+        }
+
+        /* Final frame content checksum */
+        if (zfh.checksumFlag) {
+            if (remainingSize < 4)
+                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+            ip += 4;
+        }
+
+        frameSizeInfo.compressedSize = ip - ipstart;
+        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+                                        ? zfh.frameContentSize
+                                        : nbBlocks * zfh.blockSizeMax;
+        return frameSizeInfo;
     }
 }
 
-
-static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
-{
-    if (dst != dctx->previousDstEnd) {   /* not contiguous */
-        dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
-        dctx->previousDstEnd = dst;
-    }
-}
-
-size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
-                            void* dst, size_t dstCapacity,
-                      const void* src, size_t srcSize)
-{
-    size_t dSize;
-    ZSTD_checkContinuity(dctx, dst);
-    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
-    dctx->previousDstEnd = (char*)dst + dSize;
-    return dSize;
-}
-
-
-/** ZSTD_insertBlock() :
-    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
-ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
-{
-    ZSTD_checkContinuity(dctx, blockStart);
-    dctx->previousDstEnd = (const char*)blockStart + blockSize;
-    return blockSize;
-}
-
-
-static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
-{
-    if (length > dstCapacity) return ERROR(dstSize_tooSmall);
-    memset(dst, byte, length);
-    return length;
-}
-
 /** ZSTD_findFrameCompressedSize() :
  *  compatible with legacy mode
  *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
@@ -1743,55 +529,91 @@
  *  @return : the compressed size of the frame starting at `src` */
 size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
 {
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
-    if (ZSTD_isLegacy(src, srcSize))
-        return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
-#endif
-    if ( (srcSize >= ZSTD_skippableHeaderSize)
-      && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
-        return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
-    } else {
-        const BYTE* ip = (const BYTE*)src;
-        const BYTE* const ipstart = ip;
-        size_t remainingSize = srcSize;
-        ZSTD_frameHeader zfh;
+    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+    return frameSizeInfo.compressedSize;
+}
 
-        /* Extract Frame Header */
-        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
-            if (ZSTD_isError(ret)) return ret;
-            if (ret > 0) return ERROR(srcSize_wrong);
-        }
+/** ZSTD_decompressBound() :
+ *  compatible with legacy mode
+ *  `src` must point to the start of a ZSTD frame or a skippeable frame
+ *  `srcSize` must be at least as large as the frame contained
+ *  @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+    unsigned long long bound = 0;
+    /* Iterate over each frame */
+    while (srcSize > 0) {
+        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+        size_t const compressedSize = frameSizeInfo.compressedSize;
+        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+            return ZSTD_CONTENTSIZE_ERROR;
+        assert(srcSize >= compressedSize);
+        src = (const BYTE*)src + compressedSize;
+        srcSize -= compressedSize;
+        bound += decompressedBound;
+    }
+    return bound;
+}
 
-        ip += zfh.headerSize;
-        remainingSize -= zfh.headerSize;
 
-        /* Loop on each block */
-        while (1) {
-            blockProperties_t blockProperties;
-            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-            if (ZSTD_isError(cBlockSize)) return cBlockSize;
+/*-*************************************************************
+ *   Frame decoding
+ ***************************************************************/
 
-            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
-                return ERROR(srcSize_wrong);
 
-            ip += ZSTD_blockHeaderSize + cBlockSize;
-            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
-
-            if (blockProperties.lastBlock) break;
-        }
-
-        if (zfh.checksumFlag) {   /* Final frame content checksum */
-            if (remainingSize < 4) return ERROR(srcSize_wrong);
-            ip += 4;
-            remainingSize -= 4;
-        }
-
-        return ip - ipstart;
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+        dctx->prefixStart = dst;
+        dctx->previousDstEnd = dst;
     }
 }
 
+/** ZSTD_insertBlock() :
+    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+    ZSTD_checkContinuity(dctx, blockStart);
+    dctx->previousDstEnd = (const char*)blockStart + blockSize;
+    return blockSize;
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+                          const void* src, size_t srcSize)
+{
+    DEBUGLOG(5, "ZSTD_copyRawBlock");
+    if (dst == NULL) {
+        if (srcSize == 0) return 0;
+        RETURN_ERROR(dstBuffer_null);
+    }
+    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+                               BYTE b,
+                               size_t regenSize)
+{
+    if (dst == NULL) {
+        if (regenSize == 0) return 0;
+        RETURN_ERROR(dstBuffer_null);
+    }
+    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
+    memset(dst, b, regenSize);
+    return regenSize;
+}
+
+
 /*! ZSTD_decompressFrame() :
-*   @dctx must be properly initialized */
+ * @dctx must be properly initialized
+ *  will update *srcPtr and *srcSizePtr,
+ *  to make *srcPtr progress by one frame. */
 static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
                                    void* dst, size_t dstCapacity,
                              const void** srcPtr, size_t *srcSizePtr)
@@ -1800,31 +622,34 @@
     BYTE* const ostart = (BYTE* const)dst;
     BYTE* const oend = ostart + dstCapacity;
     BYTE* op = ostart;
-    size_t remainingSize = *srcSizePtr;
+    size_t remainingSrcSize = *srcSizePtr;
+
+    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
 
     /* check */
-    if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
-        return ERROR(srcSize_wrong);
+    RETURN_ERROR_IF(
+        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
+        srcSize_wrong);
 
     /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
+    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
         if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
-        if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
-            return ERROR(srcSize_wrong);
-        CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
-        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+                        srcSize_wrong);
+        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
     }
 
     /* Loop on each block */
     while (1) {
         size_t decodedSize;
         blockProperties_t blockProperties;
-        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
         if (ZSTD_isError(cBlockSize)) return cBlockSize;
 
         ip += ZSTD_blockHeaderSize;
-        remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        remainingSrcSize -= ZSTD_blockHeaderSize;
+        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
 
         switch(blockProperties.blockType)
         {
@@ -1835,11 +660,11 @@
             decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
             break;
         case bt_rle :
-            decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+            decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize);
             break;
         case bt_reserved :
         default:
-            return ERROR(corruption_detected);
+            RETURN_ERROR(corruption_detected);
         }
 
         if (ZSTD_isError(decodedSize)) return decodedSize;
@@ -1847,33 +672,30 @@
             XXH64_update(&dctx->xxhState, op, decodedSize);
         op += decodedSize;
         ip += cBlockSize;
-        remainingSize -= cBlockSize;
+        remainingSrcSize -= cBlockSize;
         if (blockProperties.lastBlock) break;
     }
 
     if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-        if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
-            return ERROR(corruption_detected);
-    }   }
+        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+                        corruption_detected);
+    }
     if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
         U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
         U32 checkRead;
-        if (remainingSize<4) return ERROR(checksum_wrong);
+        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
         checkRead = MEM_readLE32(ip);
-        if (checkRead != checkCalc) return ERROR(checksum_wrong);
+        RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
         ip += 4;
-        remainingSize -= 4;
+        remainingSrcSize -= 4;
     }
 
     /* Allow caller to get size read */
     *srcPtr = ip;
-    *srcSizePtr = remainingSize;
+    *srcSizePtr = remainingSrcSize;
     return op-ostart;
 }
 
-static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
-
 static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
                                         void* dst, size_t dstCapacity,
                                   const void* src, size_t srcSize,
@@ -1881,26 +703,30 @@
                                   const ZSTD_DDict* ddict)
 {
     void* const dststart = dst;
+    int moreThan1Frame = 0;
+
+    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
     assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
 
     if (ddict) {
-        dict = ZSTD_DDictDictContent(ddict);
-        dictSize = ZSTD_DDictDictSize(ddict);
+        dict = ZSTD_DDict_dictContent(ddict);
+        dictSize = ZSTD_DDict_dictSize(ddict);
     }
 
-    while (srcSize >= ZSTD_frameHeaderSize_prefix) {
-        U32 magicNumber;
+    while (srcSize >= ZSTD_FRAMEHEADERSIZE_PREFIX) {
 
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
         if (ZSTD_isLegacy(src, srcSize)) {
             size_t decodedSize;
             size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
             if (ZSTD_isError(frameSize)) return frameSize;
-            /* legacy support is not compatible with static dctx */
-            if (dctx->staticSize) return ERROR(memory_allocation);
+            RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
+                "legacy support is not compatible with static dctx");
 
             decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
+            if (ZSTD_isError(decodedSize)) return decodedSize;
 
+            assert(decodedSize <=- dstCapacity);
             dst = (BYTE*)dst + decodedSize;
             dstCapacity -= decodedSize;
 
@@ -1911,45 +737,53 @@
         }
 #endif
 
-        magicNumber = MEM_readLE32(src);
-        DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
-                    (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
-        if (magicNumber != ZSTD_MAGICNUMBER) {
-            if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
-                size_t skippableSize;
-                if (srcSize < ZSTD_skippableHeaderSize)
-                    return ERROR(srcSize_wrong);
-                skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
-                              + ZSTD_skippableHeaderSize;
-                if (srcSize < skippableSize) return ERROR(srcSize_wrong);
+        {   U32 const magicNumber = MEM_readLE32(src);
+            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+                FORWARD_IF_ERROR(skippableSize);
+                assert(skippableSize <= srcSize);
 
                 src = (const BYTE *)src + skippableSize;
                 srcSize -= skippableSize;
                 continue;
-            }
-            return ERROR(prefix_unknown);
-        }
+        }   }
 
         if (ddict) {
             /* we were called from ZSTD_decompress_usingDDict */
-            CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
         } else {
             /* this will initialize correctly with no dict if dict == NULL, so
              * use this in all cases but ddict */
-            CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
         }
         ZSTD_checkContinuity(dctx, dst);
 
         {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
                                                     &src, &srcSize);
+            RETURN_ERROR_IF(
+                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+             && (moreThan1Frame==1),
+                srcSize_wrong,
+                "at least one frame successfully completed, but following "
+                "bytes are garbage: it's more likely to be a srcSize error, "
+                "specifying more bytes than compressed size of frame(s). This "
+                "error message replaces ERROR(prefix_unknown), which would be "
+                "confusing, as the first header is actually correct. Note that "
+                "one could be unlucky, it might be a corruption error instead, "
+                "happening right at the place where we expect zstd magic "
+                "bytes. But this is _much_ less likely than a srcSize field "
+                "error.");
             if (ZSTD_isError(res)) return res;
-            /* no need to bound check, ZSTD_decompressFrame already has */
+            assert(res <= dstCapacity);
             dst = (BYTE*)dst + res;
             dstCapacity -= res;
         }
+        moreThan1Frame = 1;
     }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
 
-    if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
+    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
 
     return (BYTE*)dst - (BYTE*)dststart;
 }
@@ -1963,9 +797,26 @@
 }
 
 
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+    switch (dctx->dictUses) {
+    default:
+        assert(0 /* Impossible */);
+        /* fall-through */
+    case ZSTD_dont_use:
+        ZSTD_clearDict(dctx);
+        return NULL;
+    case ZSTD_use_indefinitely:
+        return dctx->ddict;
+    case ZSTD_use_once:
+        dctx->dictUses = ZSTD_dont_use;
+        return dctx->ddict;
+    }
+}
+
 size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
-    return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
 }
 
 
@@ -1974,12 +825,13 @@
 #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
     size_t regenSize;
     ZSTD_DCtx* const dctx = ZSTD_createDCtx();
-    if (dctx==NULL) return ERROR(memory_allocation);
+    RETURN_ERROR_IF(dctx==NULL, memory_allocation);
     regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
     ZSTD_freeDCtx(dctx);
     return regenSize;
 #else   /* stack mode */
     ZSTD_DCtx dctx;
+    ZSTD_initDCtx_internal(&dctx);
     return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
 #endif
 }
@@ -2021,9 +873,9 @@
  *            or an error code, which can be tested using ZSTD_isError() */
 size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
-    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
+    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
     /* Sanity check */
-    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);  /* not allowed */
+    RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
     if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
 
     switch (dctx->stage)
@@ -2031,10 +883,10 @@
     case ZSTDds_getFrameHeaderSize :
         assert(src != NULL);
         if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
-            assert(srcSize >= ZSTD_frameIdSize);  /* to read skippable magic number */
-            if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
+            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
+            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
                 memcpy(dctx->headerBuffer, src, srcSize);
-                dctx->expected = ZSTD_skippableHeaderSize - srcSize;  /* remaining to load to get full skippable frame header */
+                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
                 dctx->stage = ZSTDds_decodeSkippableHeader;
                 return 0;
         }   }
@@ -2048,7 +900,7 @@
     case ZSTDds_decodeFrameHeader:
         assert(src != NULL);
         memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
-        CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
         dctx->expected = ZSTD_blockHeaderSize;
         dctx->stage = ZSTDds_decodeBlockHeader;
         return 0;
@@ -2094,23 +946,23 @@
                 rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
                 break;
             case bt_rle :
-                rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
+                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
                 break;
             case bt_reserved :   /* should never happen */
             default:
-                return ERROR(corruption_detected);
+                RETURN_ERROR(corruption_detected);
             }
             if (ZSTD_isError(rSize)) return rSize;
-            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
+            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
             dctx->decodedSize += rSize;
             if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
 
             if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
-                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
-                if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
-                    if (dctx->decodedSize != dctx->fParams.frameContentSize) {
-                        return ERROR(corruption_detected);
-                }   }
+                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
+                RETURN_ERROR_IF(
+                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+                 && dctx->decodedSize != dctx->fParams.frameContentSize,
+                    corruption_detected);
                 if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
                     dctx->expected = 4;
                     dctx->stage = ZSTDds_checkChecksum;
@@ -2130,8 +982,8 @@
         assert(srcSize == 4);  /* guaranteed by dctx->expected */
         {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
             U32 const check32 = MEM_readLE32(src);
-            DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
-            if (check32 != h32) return ERROR(checksum_wrong);
+            DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+            RETURN_ERROR_IF(check32 != h32, checksum_wrong);
             dctx->expected = 0;
             dctx->stage = ZSTDds_getFrameHeaderSize;
             return 0;
@@ -2139,9 +991,9 @@
 
     case ZSTDds_decodeSkippableHeader:
         assert(src != NULL);
-        assert(srcSize <= ZSTD_skippableHeaderSize);
-        memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize);   /* complete skippable header */
-        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
+        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+        memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
+        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
         dctx->stage = ZSTDds_skipFrame;
         return 0;
 
@@ -2151,7 +1003,8 @@
         return 0;
 
     default:
-        return ERROR(GENERIC);   /* impossible */
+        assert(0);   /* impossible */
+        RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
     }
 }
 
@@ -2159,38 +1012,52 @@
 static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
 {
     dctx->dictEnd = dctx->previousDstEnd;
-    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-    dctx->base = dict;
+    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+    dctx->prefixStart = dict;
     dctx->previousDstEnd = (const char*)dict + dictSize;
     return 0;
 }
 
-/* ZSTD_loadEntropy() :
- * dict : must point at beginning of a valid zstd dictionary
+/*! ZSTD_loadDEntropy() :
+ *  dict : must point at beginning of a valid zstd dictionary.
  * @return : size of entropy tables read */
-static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
+size_t
+ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+                  const void* const dict, size_t const dictSize)
 {
     const BYTE* dictPtr = (const BYTE*)dict;
     const BYTE* const dictEnd = dictPtr + dictSize;
 
-    if (dictSize <= 8) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
+    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
     dictPtr += 8;   /* skip header = magic + dictID */
 
-
-    {   size_t const hSize = HUF_readDTableX4_wksp(
-            entropy->hufTable, dictPtr, dictEnd - dictPtr,
-            entropy->workspace, sizeof(entropy->workspace));
-        if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
+        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+#ifdef HUF_FORCE_DECOMPRESS_X1
+        /* in minimal huffman, we always use X1 variants */
+        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
+                                                dictPtr, dictEnd - dictPtr,
+                                                workspace, workspaceSize);
+#else
+        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+                                                dictPtr, dictEnd - dictPtr,
+                                                workspace, workspaceSize);
+#endif
+        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
         dictPtr += hSize;
     }
 
     {   short offcodeNCount[MaxOff+1];
-        U32 offcodeMaxValue = MaxOff, offcodeLog;
+        unsigned offcodeMaxValue = MaxOff, offcodeLog;
         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
-        if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
-        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->OFTable,
+        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
+        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
+        ZSTD_buildFSETable( entropy->OFTable,
                             offcodeNCount, offcodeMaxValue,
                             OF_base, OF_bits,
                             offcodeLog);
@@ -2200,10 +1067,10 @@
     {   short matchlengthNCount[MaxML+1];
         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
-        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->MLTable,
+        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
+        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
+        ZSTD_buildFSETable( entropy->MLTable,
                             matchlengthNCount, matchlengthMaxValue,
                             ML_base, ML_bits,
                             matchlengthLog);
@@ -2213,22 +1080,23 @@
     {   short litlengthNCount[MaxLL+1];
         unsigned litlengthMaxValue = MaxLL, litlengthLog;
         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
-        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
-        if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
-        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->LLTable,
+        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
+        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
+        ZSTD_buildFSETable( entropy->LLTable,
                             litlengthNCount, litlengthMaxValue,
                             LL_base, LL_bits,
                             litlengthLog);
         dictPtr += litlengthHeaderSize;
     }
 
-    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
     {   int i;
         size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
         for (i=0; i<3; i++) {
             U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
-            if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
+            RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
+                            dictionary_corrupted);
             entropy->rep[i] = rep;
     }   }
 
@@ -2242,11 +1110,11 @@
         if (magic != ZSTD_MAGIC_DICTIONARY) {
             return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
     }   }
-    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
+    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
 
     /* load entropy tables */
-    {   size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
-        if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
+        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
         dict = (const char*)dict + eSize;
         dictSize -= eSize;
     }
@@ -2256,7 +1124,6 @@
     return ZSTD_refDictContent(dctx, dict, dictSize);
 }
 
-/* Note : this function cannot fail */
 size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
 {
     assert(dctx != NULL);
@@ -2264,8 +1131,8 @@
     dctx->stage = ZSTDds_getFrameHeaderSize;
     dctx->decodedSize = 0;
     dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
+    dctx->prefixStart = NULL;
+    dctx->virtualStart = NULL;
     dctx->dictEnd = NULL;
     dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
     dctx->litEntropy = dctx->fseEntropy = 0;
@@ -2281,201 +1148,36 @@
 
 size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
 {
-    CHECK_F( ZSTD_decompressBegin(dctx) );
+    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
     if (dict && dictSize)
-        CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+        RETURN_ERROR_IF(
+            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+            dictionary_corrupted);
     return 0;
 }
 
 
 /* ======   ZSTD_DDict   ====== */
 
-struct ZSTD_DDict_s {
-    void* dictBuffer;
-    const void* dictContent;
-    size_t dictSize;
-    ZSTD_entropyDTables_t entropy;
-    U32 dictID;
-    U32 entropyPresent;
-    ZSTD_customMem cMem;
-};  /* typedef'd to ZSTD_DDict within "zstd.h" */
-
-static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
 {
-    return ddict->dictContent;
-}
-
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
-{
-    return ddict->dictSize;
-}
-
-size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
-{
-    CHECK_F( ZSTD_decompressBegin(dstDCtx) );
-    if (ddict) {   /* support begin on NULL */
-        dstDCtx->dictID = ddict->dictID;
-        dstDCtx->base = ddict->dictContent;
-        dstDCtx->vBase = ddict->dictContent;
-        dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
-        dstDCtx->previousDstEnd = dstDCtx->dictEnd;
-        if (ddict->entropyPresent) {
-            dstDCtx->litEntropy = 1;
-            dstDCtx->fseEntropy = 1;
-            dstDCtx->LLTptr = ddict->entropy.LLTable;
-            dstDCtx->MLTptr = ddict->entropy.MLTable;
-            dstDCtx->OFTptr = ddict->entropy.OFTable;
-            dstDCtx->HUFptr = ddict->entropy.hufTable;
-            dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
-            dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
-            dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
-        } else {
-            dstDCtx->litEntropy = 0;
-            dstDCtx->fseEntropy = 0;
-        }
+    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+    assert(dctx != NULL);
+    if (ddict) {
+        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
+        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
+        const void* const dictEnd = dictStart + dictSize;
+        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
+        DEBUGLOG(4, "DDict is %s",
+                    dctx->ddictIsCold ? "~cold~" : "hot!");
+    }
+    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
+    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
+        ZSTD_copyDDictParameters(dctx, ddict);
     }
     return 0;
 }
 
-static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType)
-{
-    ddict->dictID = 0;
-    ddict->entropyPresent = 0;
-    if (dictContentType == ZSTD_dct_rawContent) return 0;
-
-    if (ddict->dictSize < 8) {
-        if (dictContentType == ZSTD_dct_fullDict)
-            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
-        return 0;   /* pure content mode */
-    }
-    {   U32 const magic = MEM_readLE32(ddict->dictContent);
-        if (magic != ZSTD_MAGIC_DICTIONARY) {
-            if (dictContentType == ZSTD_dct_fullDict)
-                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
-            return 0;   /* pure content mode */
-        }
-    }
-    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
-
-    /* load entropy tables */
-    CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
-    ddict->entropyPresent = 1;
-    return 0;
-}
-
-
-static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
-                                      const void* dict, size_t dictSize,
-                                      ZSTD_dictLoadMethod_e dictLoadMethod,
-                                      ZSTD_dictContentType_e dictContentType)
-{
-    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
-        ddict->dictBuffer = NULL;
-        ddict->dictContent = dict;
-    } else {
-        void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
-        ddict->dictBuffer = internalBuffer;
-        ddict->dictContent = internalBuffer;
-        if (!internalBuffer) return ERROR(memory_allocation);
-        memcpy(internalBuffer, dict, dictSize);
-    }
-    ddict->dictSize = dictSize;
-    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
-
-    /* parse dictionary content */
-    CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) );
-
-    return 0;
-}
-
-ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
-                                      ZSTD_dictLoadMethod_e dictLoadMethod,
-                                      ZSTD_dictContentType_e dictContentType,
-                                      ZSTD_customMem customMem)
-{
-    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-
-    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
-        if (!ddict) return NULL;
-        ddict->cMem = customMem;
-
-        if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) {
-            ZSTD_freeDDict(ddict);
-            return NULL;
-        }
-
-        return ddict;
-    }
-}
-
-/*! ZSTD_createDDict() :
-*   Create a digested dictionary, to start decompression without startup delay.
-*   `dict` content is copied inside DDict.
-*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
-ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
-{
-    ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
-}
-
-/*! ZSTD_createDDict_byReference() :
- *  Create a digested dictionary, to start decompression without startup delay.
- *  Dictionary content is simply referenced, it will be accessed during decompression.
- *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
-ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
-{
-    ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
-}
-
-
-const ZSTD_DDict* ZSTD_initStaticDDict(
-                                void* workspace, size_t workspaceSize,
-                                const void* dict, size_t dictSize,
-                                ZSTD_dictLoadMethod_e dictLoadMethod,
-                                ZSTD_dictContentType_e dictContentType)
-{
-    size_t const neededSpace =
-            sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-    ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
-    assert(workspace != NULL);
-    assert(dict != NULL);
-    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
-    if (workspaceSize < neededSpace) return NULL;
-    if (dictLoadMethod == ZSTD_dlm_byCopy) {
-        memcpy(ddict+1, dict, dictSize);  /* local copy */
-        dict = ddict+1;
-    }
-    if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) ))
-        return NULL;
-    return ddict;
-}
-
-
-size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;   /* support free on NULL */
-    {   ZSTD_customMem const cMem = ddict->cMem;
-        ZSTD_free(ddict->dictBuffer, cMem);
-        ZSTD_free(ddict, cMem);
-        return 0;
-    }
-}
-
-/*! ZSTD_estimateDDictSize() :
- *  Estimate amount of memory that will be needed to create a dictionary for decompression.
- *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
-size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
-{
-    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-}
-
-size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
-}
-
 /*! ZSTD_getDictID_fromDict() :
  *  Provides the dictID stored within dictionary.
  *  if @return == 0, the dictionary is not conformant with Zstandard specification.
@@ -2484,21 +1186,11 @@
 {
     if (dictSize < 8) return 0;
     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
-    return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
-}
-
-/*! ZSTD_getDictID_fromDDict() :
- *  Provides the dictID of the dictionary loaded into `ddict`.
- *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
-{
-    if (ddict==NULL) return 0;
-    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
 }
 
 /*! ZSTD_getDictID_fromFrame() :
- *  Provides the dictID required to decompresse frame stored within `src`.
+ *  Provides the dictID required to decompress frame stored within `src`.
  *  If @return == 0, the dictID could not be decoded.
  *  This could for one of the following reasons :
  *  - The frame does not require a dictionary (most common case).
@@ -2560,22 +1252,24 @@
 }
 
 
-/* *** Initialization *** */
+/* ***  Initialization  *** */
 
 size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
 size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
 
-size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+                                   const void* dict, size_t dictSize,
+                                         ZSTD_dictLoadMethod_e dictLoadMethod,
+                                         ZSTD_dictContentType_e dictContentType)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    ZSTD_freeDDict(dctx->ddictLocal);
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    ZSTD_clearDict(dctx);
     if (dict && dictSize >= 8) {
         dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
-        if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
-    } else {
-        dctx->ddictLocal = NULL;
+        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
+        dctx->ddict = dctx->ddictLocal;
+        dctx->dictUses = ZSTD_use_indefinitely;
     }
-    dctx->ddict = dctx->ddictLocal;
     return 0;
 }
 
@@ -2591,7 +1285,9 @@
 
 size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
 {
-    return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
+    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
+    dctx->dictUses = ZSTD_use_once;
+    return 0;
 }
 
 size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
@@ -2601,28 +1297,21 @@
 
 
 /* ZSTD_initDStream_usingDict() :
- * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
  * this function cannot fail */
 size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
 {
     DEBUGLOG(4, "ZSTD_initDStream_usingDict");
-    zds->streamStage = zdss_init;
-    CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
-    return ZSTD_frameHeaderSize_prefix;
+    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+    return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
 /* note : this variant can't fail */
 size_t ZSTD_initDStream(ZSTD_DStream* zds)
 {
     DEBUGLOG(4, "ZSTD_initDStream");
-    return ZSTD_initDStream_usingDict(zds, NULL, 0);
-}
-
-size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
-{
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    dctx->ddict = ddict;
-    return 0;
+    return ZSTD_initDStream_usingDDict(zds, NULL);
 }
 
 /* ZSTD_initDStream_usingDDict() :
@@ -2630,51 +1319,119 @@
  * this function cannot fail */
 size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
 {
-    size_t const initResult = ZSTD_initDStream(dctx);
-    dctx->ddict = ddict;
-    return initResult;
+    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
+    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
+    return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
 /* ZSTD_resetDStream() :
- * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * return : expected size, aka ZSTD_FRAMEHEADERSIZE_PREFIX.
  * this function cannot fail */
 size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
 {
-    DEBUGLOG(4, "ZSTD_resetDStream");
-    dctx->streamStage = zdss_loadHeader;
-    dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
-    dctx->legacyVersion = 0;
-    dctx->hostageByte = 0;
-    return ZSTD_frameHeaderSize_prefix;
+    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
+    return ZSTD_FRAMEHEADERSIZE_PREFIX;
 }
 
-size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
-                                ZSTD_DStreamParameter_e paramType, unsigned paramValue)
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    switch(paramType)
-    {
-        default : return ERROR(parameter_unsupported);
-        case DStream_p_maxWindowSize :
-            DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
-            dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1);
-            break;
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    ZSTD_clearDict(dctx);
+    if (ddict) {
+        dctx->ddict = ddict;
+        dctx->dictUses = ZSTD_use_indefinitely;
     }
     return 0;
 }
 
+/* ZSTD_DCtx_setMaxWindowSize() :
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
+ * since this version sets windowSize, and the other sets windowLog */
 size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
 {
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
+    size_t const min = (size_t)1 << bounds.lowerBound;
+    size_t const max = (size_t)1 << bounds.upperBound;
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
+    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
     dctx->maxWindowSize = maxWindowSize;
     return 0;
 }
 
 size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
 {
-    DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    dctx->format = format;
+    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format);
+}
+
+ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+{
+    ZSTD_bounds bounds = { 0, 0, 0 };
+    switch(dParam) {
+        case ZSTD_d_windowLogMax:
+            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
+            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+            return bounds;
+        case ZSTD_d_format:
+            bounds.lowerBound = (int)ZSTD_f_zstd1;
+            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
+            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+            return bounds;
+        default:;
+    }
+    bounds.error = ERROR(parameter_unsupported);
+    return bounds;
+}
+
+/* ZSTD_dParam_withinBounds:
+ * @return 1 if value is within dParam bounds,
+ * 0 otherwise */
+static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
+{
+    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
+    if (ZSTD_isError(bounds.error)) return 0;
+    if (value < bounds.lowerBound) return 0;
+    if (value > bounds.upperBound) return 0;
+    return 1;
+}
+
+#define CHECK_DBOUNDS(p,v) {                \
+    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
+}
+
+size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
+{
+    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+    switch(dParam) {
+        case ZSTD_d_windowLogMax:
+            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
+            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
+            dctx->maxWindowSize = ((size_t)1) << value;
+            return 0;
+        case ZSTD_d_format:
+            CHECK_DBOUNDS(ZSTD_d_format, value);
+            dctx->format = (ZSTD_format_e)value;
+            return 0;
+        default:;
+    }
+    RETURN_ERROR(parameter_unsupported);
+}
+
+size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+{
+    if ( (reset == ZSTD_reset_session_only)
+      || (reset == ZSTD_reset_session_and_parameters) ) {
+        dctx->streamStage = zdss_init;
+        dctx->noForwardProgress = 0;
+    }
+    if ( (reset == ZSTD_reset_parameters)
+      || (reset == ZSTD_reset_session_and_parameters) ) {
+        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+        ZSTD_clearDict(dctx);
+        dctx->format = ZSTD_f_zstd1;
+        dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+    }
     return 0;
 }
 
@@ -2690,7 +1447,8 @@
     unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
     unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
     size_t const minRBSize = (size_t) neededSize;
-    if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+                    frameParameter_windowTooLarge);
     return minRBSize;
 }
 
@@ -2704,13 +1462,13 @@
 
 size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
 {
-    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable */
+    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
     ZSTD_frameHeader zfh;
     size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
     if (ZSTD_isError(err)) return err;
-    if (err>0) return ERROR(srcSize_wrong);
-    if (zfh.windowSize > windowSizeMax)
-        return ERROR(frameParameter_windowTooLarge);
+    RETURN_ERROR_IF(err>0, srcSize_wrong);
+    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+                    frameParameter_windowTooLarge);
     return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
 }
 
@@ -2736,16 +1494,16 @@
     U32 someMoreWork = 1;
 
     DEBUGLOG(5, "ZSTD_decompressStream");
-    if (input->pos > input->size) {  /* forbidden */
-        DEBUGLOG(5, "in: pos: %u   vs size: %u",
-                    (U32)input->pos, (U32)input->size);
-        return ERROR(srcSize_wrong);
-    }
-    if (output->pos > output->size) {  /* forbidden */
-        DEBUGLOG(5, "out: pos: %u   vs size: %u",
-                    (U32)output->pos, (U32)output->size);
-        return ERROR(dstSize_tooSmall);
-    }
+    RETURN_ERROR_IF(
+        input->pos > input->size,
+        srcSize_wrong,
+        "forbidden. in: pos: %u   vs size: %u",
+        (U32)input->pos, (U32)input->size);
+    RETURN_ERROR_IF(
+        output->pos > output->size,
+        dstSize_tooSmall,
+        "forbidden. out: pos: %u   vs size: %u",
+        (U32)output->pos, (U32)output->size);
     DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
 
     while (someMoreWork) {
@@ -2753,32 +1511,36 @@
         {
         case zdss_init :
             DEBUGLOG(5, "stage zdss_init => transparent reset ");
-            ZSTD_resetDStream(zds);   /* transparent reset on starting decoding a new frame */
+            zds->streamStage = zdss_loadHeader;
+            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+            zds->legacyVersion = 0;
+            zds->hostageByte = 0;
             /* fall-through */
 
         case zdss_loadHeader :
             DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
             if (zds->legacyVersion) {
-                /* legacy support is incompatible with static dctx */
-                if (zds->staticSize) return ERROR(memory_allocation);
+                RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+                    "legacy support is incompatible with static dctx");
                 {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
                     if (hint==0) zds->streamStage = zdss_init;
                     return hint;
             }   }
 #endif
-            {   size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
                 DEBUGLOG(5, "header size : %u", (U32)hSize);
                 if (ZSTD_isError(hSize)) {
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
                     U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
                     if (legacyVersion) {
-                        const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
-                        size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
+                        ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
+                        const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
+                        size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
                         DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
-                        /* legacy support is incompatible with static dctx */
-                        if (zds->staticSize) return ERROR(memory_allocation);
-                        CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
+                        RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+                            "legacy support is incompatible with static dctx");
+                        FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
                                     zds->previousLegacyVersion, legacyVersion,
                                     dict, dictSize));
                         zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
@@ -2799,7 +1561,7 @@
                             zds->lhSize += remainingInput;
                         }
                         input->pos = input->size;
-                        return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
+                        return (MAX(ZSTD_FRAMEHEADERSIZE_MIN, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
                     }
                     assert(ip != NULL);
                     memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
@@ -2812,7 +1574,7 @@
                 size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
                 if (cSize <= (size_t)(iend-istart)) {
                     /* shortcut : using single-pass mode */
-                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
+                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
                     if (ZSTD_isError(decompressedSize)) return decompressedSize;
                     DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
                     ip = istart + cSize;
@@ -2825,13 +1587,13 @@
 
             /* Consume header (see ZSTDds_decodeFrameHeader) */
             DEBUGLOG(4, "Consume header");
-            CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
+            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
 
-            if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
-                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
+            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
+                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
                 zds->stage = ZSTDds_skipFrame;
             } else {
-                CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
                 zds->expected = ZSTD_blockHeaderSize;
                 zds->stage = ZSTDds_decodeBlockHeader;
             }
@@ -2841,7 +1603,8 @@
                         (U32)(zds->fParams.windowSize >>10),
                         (U32)(zds->maxWindowSize >> 10) );
             zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
-            if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+                            frameParameter_windowTooLarge);
 
             /* Adapt buffer sizes to frame header instructions */
             {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
@@ -2855,14 +1618,15 @@
                     if (zds->staticSize) {  /* static DCtx */
                         DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
                         assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
-                        if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
-                            return ERROR(memory_allocation);
+                        RETURN_ERROR_IF(
+                            bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+                            memory_allocation);
                     } else {
                         ZSTD_free(zds->inBuff, zds->customMem);
                         zds->inBuffSize = 0;
                         zds->outBuffSize = 0;
                         zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
-                        if (zds->inBuff == NULL) return ERROR(memory_allocation);
+                        RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
                     }
                     zds->inBuffSize = neededInBuffSize;
                     zds->outBuff = zds->inBuff + zds->inBuffSize;
@@ -2904,7 +1668,9 @@
                 if (isSkipFrame) {
                     loadedSize = MIN(toLoad, (size_t)(iend-ip));
                 } else {
-                    if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected);   /* should never happen */
+                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+                                    corruption_detected,
+                                    "should never happen");
                     loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
                 }
                 ip += loadedSize;
@@ -2943,12 +1709,24 @@
             someMoreWork = 0;
             break;
 
-        default: return ERROR(GENERIC);   /* impossible */
+        default:
+            assert(0);    /* impossible */
+            RETURN_ERROR(GENERIC);   /* some compiler require default to do something */
     }   }
 
     /* result */
-    input->pos += (size_t)(ip-istart);
-    output->pos += (size_t)(op-ostart);
+    input->pos = (size_t)(ip - (const char*)(input->src));
+    output->pos = (size_t)(op - (char*)(output->dst));
+    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
+        zds->noForwardProgress ++;
+        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+            RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
+            RETURN_ERROR_IF(ip==iend, srcSize_wrong);
+            assert(0);
+        }
+    } else {
+        zds->noForwardProgress = 0;
+    }
     {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
         if (!nextSrcSizeHint) {   /* frame fully decoded */
             if (zds->outEnd == zds->outStart) {  /* output fully flushed */
@@ -2975,13 +1753,7 @@
     }
 }
 
-
-size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
-{
-    return ZSTD_decompressStream(dctx, output, input);
-}
-
-size_t ZSTD_decompress_generic_simpleArgs (
+size_t ZSTD_decompressStream_simpleArgs (
                             ZSTD_DCtx* dctx,
                             void* dst, size_t dstCapacity, size_t* dstPos,
                       const void* src, size_t srcSize, size_t* srcPos)
@@ -2989,15 +1761,8 @@
     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
     /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
-    size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
+    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
     *dstPos = output.pos;
     *srcPos = input.pos;
     return cErr;
 }
-
-void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
-{
-    (void)ZSTD_initDStream(dctx);
-    dctx->format = ZSTD_f_zstd1;
-    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.c b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
new file mode 100644
index 0000000..24f4859
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_block.c
@@ -0,0 +1,1322 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_decompress_block :
+ * this module takes care of decompressing _compressed_ block */
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include <string.h>      /* memcpy, memmove, memset */
+#include "compiler.h"    /* prefetch */
+#include "cpu.h"         /* bmi2 */
+#include "mem.h"         /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"
+#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
+#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"
+
+/*_*******************************************************
+*  Macros
+**********************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * ZSTD_decompressSequences implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
+#endif
+
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+ *   Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+ *  Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+                          blockProperties_t* bpPtr)
+{
+    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
+
+    {   U32 const cBlockHeader = MEM_readLE24(src);
+        U32 const cSize = cBlockHeader >> 3;
+        bpPtr->lastBlock = cBlockHeader & 1;
+        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+        bpPtr->origSize = cSize;   /* only useful for RLE */
+        if (bpPtr->blockType == bt_rle) return 1;
+        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
+        return cSize;
+    }
+}
+
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize);
+/*! ZSTD_decodeLiteralsBlock() :
+ * @return : nb of bytes read from src (< srcSize )
+ *  note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
+
+    {   const BYTE* const istart = (const BYTE*) src;
+        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+        switch(litEncType)
+        {
+        case set_repeat:
+            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
+            /* fall-through */
+
+        case set_compressed:
+            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+            {   size_t lhSize, litSize, litCSize;
+                U32 singleStream=0;
+                U32 const lhlCode = (istart[0] >> 2) & 3;
+                U32 const lhc = MEM_readLE32(istart);
+                size_t hufSuccess;
+                switch(lhlCode)
+                {
+                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    /* 2 - 2 - 10 - 10 */
+                    singleStream = !lhlCode;
+                    lhSize = 3;
+                    litSize  = (lhc >> 4) & 0x3FF;
+                    litCSize = (lhc >> 14) & 0x3FF;
+                    break;
+                case 2:
+                    /* 2 - 2 - 14 - 14 */
+                    lhSize = 4;
+                    litSize  = (lhc >> 4) & 0x3FFF;
+                    litCSize = lhc >> 18;
+                    break;
+                case 3:
+                    /* 2 - 2 - 18 - 18 */
+                    lhSize = 5;
+                    litSize  = (lhc >> 4) & 0x3FFFF;
+                    litCSize = (lhc >> 22) + (istart[4] << 10);
+                    break;
+                }
+                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
+
+                /* prefetch huffman table if cold */
+                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+                }
+
+                if (litEncType==set_repeat) {
+                    if (singleStream) {
+                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
+                            dctx->HUFptr, dctx->bmi2);
+                    } else {
+                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
+                            dctx->HUFptr, dctx->bmi2);
+                    }
+                } else {
+                    if (singleStream) {
+#if defined(HUF_FORCE_DECOMPRESS_X2)
+                        hufSuccess = HUF_decompress1X_DCtx_wksp(
+                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
+                            istart+lhSize, litCSize, dctx->workspace,
+                            sizeof(dctx->workspace));
+#else
+                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
+                            istart+lhSize, litCSize, dctx->workspace,
+                            sizeof(dctx->workspace), dctx->bmi2);
+#endif
+                    } else {
+                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
+                            istart+lhSize, litCSize, dctx->workspace,
+                            sizeof(dctx->workspace), dctx->bmi2);
+                    }
+                }
+
+                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
+
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                dctx->litEntropy = 1;
+                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return litCSize + lhSize;
+            }
+
+        case set_basic:
+            {   size_t litSize, lhSize;
+                U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    break;
+                }
+
+                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
+                    memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                    dctx->litPtr = dctx->litBuffer;
+                    dctx->litSize = litSize;
+                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                    return lhSize+litSize;
+                }
+                /* direct reference into compressed stream */
+                dctx->litPtr = istart+lhSize;
+                dctx->litSize = litSize;
+                return lhSize+litSize;
+            }
+
+        case set_rle:
+            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                size_t litSize, lhSize;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
+                    break;
+                }
+                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                return lhSize+1;
+            }
+        default:
+            RETURN_ERROR(corruption_detected, "impossible");
+        }
+    }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+     /* nextState, nbAddBits, nbBits, baseVal */
+     {  0,  0,  4,    0},  { 16,  0,  4,    0},
+     { 32,  0,  5,    1},  {  0,  0,  5,    3},
+     {  0,  0,  5,    4},  {  0,  0,  5,    6},
+     {  0,  0,  5,    7},  {  0,  0,  5,    9},
+     {  0,  0,  5,   10},  {  0,  0,  5,   12},
+     {  0,  0,  6,   14},  {  0,  1,  5,   16},
+     {  0,  1,  5,   20},  {  0,  1,  5,   22},
+     {  0,  2,  5,   28},  {  0,  3,  5,   32},
+     {  0,  4,  5,   48},  { 32,  6,  5,   64},
+     {  0,  7,  5,  128},  {  0,  8,  6,  256},
+     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
+     { 32,  0,  4,    0},  {  0,  0,  4,    1},
+     {  0,  0,  5,    2},  { 32,  0,  5,    4},
+     {  0,  0,  5,    5},  { 32,  0,  5,    7},
+     {  0,  0,  5,    8},  { 32,  0,  5,   10},
+     {  0,  0,  5,   11},  {  0,  0,  6,   13},
+     { 32,  1,  5,   16},  {  0,  1,  5,   18},
+     { 32,  1,  5,   22},  {  0,  2,  5,   24},
+     { 32,  3,  5,   32},  {  0,  3,  5,   40},
+     {  0,  6,  4,   64},  { 16,  6,  4,   64},
+     { 32,  7,  5,  128},  {  0,  9,  6,  512},
+     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
+     { 16,  0,  4,    1},  { 32,  0,  5,    2},
+     { 32,  0,  5,    3},  { 32,  0,  5,    5},
+     { 32,  0,  5,    6},  { 32,  0,  5,    8},
+     { 32,  0,  5,    9},  { 32,  0,  5,   11},
+     { 32,  0,  5,   12},  {  0,  0,  6,   15},
+     { 32,  1,  5,   18},  { 32,  1,  5,   20},
+     { 32,  2,  5,   24},  { 32,  2,  5,   28},
+     { 32,  3,  5,   40},  { 32,  4,  5,   48},
+     {  0, 16,  6,65536},  {  0, 15,  6,32768},
+     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
+};   /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+    /* nextState, nbAddBits, nbBits, baseVal */
+    {  0,  0,  5,    0},     {  0,  6,  4,   61},
+    {  0,  9,  5,  509},     {  0, 15,  5,32765},
+    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
+    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
+    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
+    {  0,  5,  5,   29},     {  0,  8,  4,  253},
+    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
+    {  0,  2,  5,    1},     { 16,  7,  4,  125},
+    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
+    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
+    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
+    {  0, 19,  5,524285},    {  0,  1,  5,    1},
+    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
+    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
+    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
+    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
+};   /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+    /* nextState, nbAddBits, nbBits, baseVal */
+    {  0,  0,  6,    3},  {  0,  0,  4,    4},
+    { 32,  0,  5,    5},  {  0,  0,  5,    6},
+    {  0,  0,  5,    8},  {  0,  0,  5,    9},
+    {  0,  0,  5,   11},  {  0,  0,  6,   13},
+    {  0,  0,  6,   16},  {  0,  0,  6,   19},
+    {  0,  0,  6,   22},  {  0,  0,  6,   25},
+    {  0,  0,  6,   28},  {  0,  0,  6,   31},
+    {  0,  0,  6,   34},  {  0,  1,  6,   37},
+    {  0,  1,  6,   41},  {  0,  2,  6,   47},
+    {  0,  3,  6,   59},  {  0,  4,  6,   83},
+    {  0,  7,  6,  131},  {  0,  9,  6,  515},
+    { 16,  0,  4,    4},  {  0,  0,  4,    5},
+    { 32,  0,  5,    6},  {  0,  0,  5,    7},
+    { 32,  0,  5,    9},  {  0,  0,  5,   10},
+    {  0,  0,  6,   12},  {  0,  0,  6,   15},
+    {  0,  0,  6,   18},  {  0,  0,  6,   21},
+    {  0,  0,  6,   24},  {  0,  0,  6,   27},
+    {  0,  0,  6,   30},  {  0,  0,  6,   33},
+    {  0,  1,  6,   35},  {  0,  1,  6,   39},
+    {  0,  2,  6,   43},  {  0,  3,  6,   51},
+    {  0,  4,  6,   67},  {  0,  5,  6,   99},
+    {  0,  8,  6,  259},  { 32,  0,  4,    4},
+    { 48,  0,  4,    4},  { 16,  0,  4,    5},
+    { 32,  0,  5,    7},  { 32,  0,  5,    8},
+    { 32,  0,  5,   10},  { 32,  0,  5,   11},
+    {  0,  0,  6,   14},  {  0,  0,  6,   17},
+    {  0,  0,  6,   20},  {  0,  0,  6,   23},
+    {  0,  0,  6,   26},  {  0,  0,  6,   29},
+    {  0,  0,  6,   32},  {  0, 16,  6,65539},
+    {  0, 15,  6,32771},  {  0, 14,  6,16387},
+    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
+    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
+};   /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
+{
+    void* ptr = dt;
+    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+    ZSTD_seqSymbol* const cell = dt + 1;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->nbBits = 0;
+    cell->nextState = 0;
+    assert(nbAddBits < 255);
+    cell->nbAdditionalBits = (BYTE)nbAddBits;
+    cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * cannot fail if input is valid =>
+ * all inputs are presumed validated at this stage */
+void
+ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+            const short* normalizedCounter, unsigned maxSymbolValue,
+            const U32* baseValue, const U32* nbAdditionalBits,
+            unsigned tableLog)
+{
+    ZSTD_seqSymbol* const tableDecode = dt+1;
+    U16 symbolNext[MaxSeq+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    assert(maxSymbolValue <= MaxSeq);
+    assert(tableLog <= MaxFSELog);
+
+    /* Init, lay down lowprob symbols */
+    {   ZSTD_seqSymbol_header DTableH;
+        DTableH.tableLog = tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].baseValue = s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSE_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].baseValue = s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            U32 const symbol = tableDecode[u].baseValue;
+            U32 const nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+            assert(nbAdditionalBits[symbol] < 255);
+            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
+            tableDecode[u].baseValue = baseValue[symbol];
+    }   }
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ *           or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
+                                 const void* src, size_t srcSize,
+                                 const U32* baseValue, const U32* nbAdditionalBits,
+                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+                                 int ddictIsCold, int nbSeq)
+{
+    switch(type)
+    {
+    case set_rle :
+        RETURN_ERROR_IF(!srcSize, srcSize_wrong);
+        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
+        {   U32 const symbol = *(const BYTE*)src;
+            U32 const baseline = baseValue[symbol];
+            U32 const nbBits = nbAdditionalBits[symbol];
+            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+        }
+        *DTablePtr = DTableSpace;
+        return 1;
+    case set_basic :
+        *DTablePtr = defaultTable;
+        return 0;
+    case set_repeat:
+        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
+        /* prefetch FSE table if used */
+        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+            const void* const pStart = *DTablePtr;
+            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+            PREFETCH_AREA(pStart, pSize);
+        }
+        return 0;
+    case set_compressed :
+        {   unsigned tableLog;
+            S16 norm[MaxSeq+1];
+            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
+            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
+            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
+            *DTablePtr = DTableSpace;
+            return headerSize;
+        }
+    default :
+        assert(0);
+        RETURN_ERROR(GENERIC, "impossible");
+    }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+                             const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip = istart;
+    int nbSeq;
+    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+    /* check */
+    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
+
+    /* SeqHead */
+    nbSeq = *ip++;
+    if (!nbSeq) {
+        *nbSeqPtr=0;
+        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
+        return 1;
+    }
+    if (nbSeq > 0x7F) {
+        if (nbSeq == 0xFF) {
+            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
+            nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+        } else {
+            RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
+            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+        }
+    }
+    *nbSeqPtr = nbSeq;
+
+    /* FSE table descriptors */
+    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong); /* minimum possible size: 1 byte for symbol encoding types */
+    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+        ip++;
+
+        /* Build DTables */
+        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+                                                      LLtype, MaxLL, LLFSELog,
+                                                      ip, iend-ip,
+                                                      LL_base, LL_bits,
+                                                      LL_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
+            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
+            ip += llhSize;
+        }
+
+        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+                                                      OFtype, MaxOff, OffFSELog,
+                                                      ip, iend-ip,
+                                                      OF_base, OF_bits,
+                                                      OF_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
+            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
+            ip += ofhSize;
+        }
+
+        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+                                                      MLtype, MaxML, MLFSELog,
+                                                      ip, iend-ip,
+                                                      ML_base, ML_bits,
+                                                      ML_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
+            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
+            ip += mlhSize;
+        }
+    }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+    const BYTE* match;
+} seq_t;
+
+typedef struct {
+    size_t state;
+    const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    ZSTD_fseState stateLL;
+    ZSTD_fseState stateOffb;
+    ZSTD_fseState stateML;
+    size_t prevOffset[ZSTD_REP_NUM];
+    const BYTE* prefixStart;
+    const BYTE* dictEnd;
+    size_t pos;
+} seqState_t;
+
+
+/* ZSTD_execSequenceLast7():
+ * exceptional case : decompress a match starting within last 7 bytes of output buffer.
+ * requires more careful checks, to ensure there is no overflow.
+ * performance does not matter though.
+ * note : this case is supposed to be never generated "naturally" by reference encoder,
+ *        since in most cases it needs at least 8 bytes to look for a match.
+ *        but it's allowed by the specification. */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE* op,
+                              BYTE* const oend, seq_t sequence,
+                              const BYTE** litPtr, const BYTE* const litLimit,
+                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
+
+    /* copy literals */
+    while (op < oLitEnd) *op++ = *(*litPtr)++;
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+    }   }
+    while (op < oMatchEnd) *op++ = *match++;
+    return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+                         BYTE* const oend, seq_t sequence,
+                         const BYTE** litPtr, const BYTE* const litLimit,
+                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
+    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+    /* copy Literals */
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy_16min(op, (*litPtr), sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    else
+        ZSTD_copy8(op, *litPtr);
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+        /* offset beyond prefix -> go into extDict */
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
+        match = dictEnd + (match - prefixStart);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = prefixStart;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequenceLong(BYTE* op,
+                             BYTE* const oend, seq_t sequence,
+                             const BYTE** litPtr, const BYTE* const litLimit,
+                             const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = sequence.match;
+
+    /* check */
+    RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+    RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
+    if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
+
+    /* copy Literals */
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy_16min(op, *litPtr, sequence.litLength, ZSTD_no_overlap);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    else
+        ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
+
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+        /* offset beyond prefix */
+        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = prefixStart;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    assert(op <= oend_w);
+    assert(sequence.matchLength >= MINMATCH);
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op, ZSTD_overlap_src_before_dst);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+    const void* ptr = dt;
+    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+                (U32)DStatePtr->state, DTableH->tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
+{
+    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offsets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
+    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
+        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
+        : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+    seq_t seq;
+    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+    U32 const totalBits = llBits+mlBits+ofBits;
+    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofBits)
+            offset = 0;
+        else {
+            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+            assert(ofBits <= MaxOff);
+            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+                BIT_reloadDStream(&seqState->DStream);
+                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
+            } else {
+                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+            }
+        }
+
+        if (ofBits <= 1) {
+            offset += (llBase==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {  /* offset == 0 */
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = mlBase
+                    + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+        BIT_reloadDStream(&seqState->DStream);
+    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+        BIT_reloadDStream(&seqState->DStream);
+    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+    seq.litLength = llBase
+                  + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0);    /* <=  16 bits */
+    if (MEM_32bits())
+        BIT_reloadDStream(&seqState->DStream);
+
+    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+    /* ANS state update */
+    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize, int nbSeq,
+                         const ZSTD_longOffset_e isLongOffset)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    DEBUGLOG(5, "ZSTD_decompressSequences_body");
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seqState_t seqState;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+        RETURN_ERROR_IF(
+            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+            corruption_detected);
+        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        ZSTD_STATIC_ASSERT(
+                BIT_DStream_unfinished < BIT_DStream_completed &&
+                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+                BIT_DStream_completed < BIT_DStream_overflow);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+            nbSeq--;
+            {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+                DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+                if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+                op += oneSeqSize;
+        }   }
+
+        /* check if reached exact end */
+        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+        RETURN_ERROR_IF(nbSeq, corruption_detected);
+        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected);
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
+{
+    seq_t seq;
+    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+    U32 const totalBits = llBits+mlBits+ofBits;
+    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofBits)
+            offset = 0;
+        else {
+            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+            assert(ofBits <= MaxOff);
+            if (MEM_32bits() && longOffsets) {
+                U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
+                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+                if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
+                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+            } else {
+                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+            }
+        }
+
+        if (ofBits <= 1) {
+            offset += (llBase==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+        BIT_reloadDStream(&seqState->DStream);
+    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+        BIT_reloadDStream(&seqState->DStream);
+    /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+    seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
+    if (MEM_32bits())
+        BIT_reloadDStream(&seqState->DStream);
+
+    {   size_t const pos = seqState->pos + seq.litLength;
+        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
+        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+                                                    * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
+        seqState->pos = pos + seq.matchLength;
+    }
+
+    /* ANS state update */
+    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+                               ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize, int nbSeq,
+                         const ZSTD_longOffset_e isLongOffset)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+    /* Regen sequences */
+    if (nbSeq) {
+#define STORED_SEQS 4
+#define STORED_SEQS_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+        seq_t sequences[STORED_SEQS];
+        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+        seqState_t seqState;
+        int seqNb;
+        dctx->fseEntropy = 1;
+        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+        seqState.prefixStart = prefixStart;
+        seqState.pos = (size_t)(op-prefixStart);
+        seqState.dictEnd = dictEnd;
+        assert(iend >= ip);
+        RETURN_ERROR_IF(
+            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+            corruption_detected);
+        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        /* prepare in advance */
+        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+            sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+        }
+        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
+
+        /* decode and decompress */
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
+            seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+            sequences[seqNb & STORED_SEQS_MASK] = sequence;
+            op += oneSeqSize;
+        }
+        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
+
+        /* finish queue */
+        seqNb -= seqAdvance;
+        for ( ; seqNb<nbSeq ; seqNb++) {
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if DYNAMIC_BMI2
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static TARGET_ATTRIBUTE("bmi2") size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+#endif /* DYNAMIC_BMI2 */
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+                            ZSTD_DCtx* dctx,
+                            void* dst, size_t maxDstSize,
+                            const void* seqStart, size_t seqSize, int nbSeq,
+                            const ZSTD_longOffset_e isLongOffset);
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static size_t
+ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+                   const void* seqStart, size_t seqSize, int nbSeq,
+                   const ZSTD_longOffset_e isLongOffset)
+{
+    DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+    if (dctx->bmi2) {
+        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+    }
+#endif
+  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+/* ZSTD_decompressSequencesLong() :
+ * decompression function triggered when a minimum share of offsets is considered "long",
+ * aka out of cache.
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
+ * This function will try to mitigate main memory latency through the use of prefetching */
+static size_t
+ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+                             void* dst, size_t maxDstSize,
+                             const void* seqStart, size_t seqSize, int nbSeq,
+                             const ZSTD_longOffset_e isLongOffset)
+{
+    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+    if (dctx->bmi2) {
+        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+    }
+#endif
+  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ *           compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+    const void* ptr = offTable;
+    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+    const ZSTD_seqSymbol* table = offTable + 1;
+    U32 const max = 1 << tableLog;
+    U32 u, total = 0;
+    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+    assert(max <= (1 << OffFSELog));  /* max not too large */
+    for (u=0; u<max; u++) {
+        if (table[u].nbAdditionalBits > 22) total += 1;
+    }
+
+    assert(tableLog <= OffFSELog);
+    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
+
+    return total;
+}
+#endif
+
+
+size_t
+ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize, const int frame)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+    /* isLongOffset must be true if there are long offsets.
+     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+     * We don't expect that to be the case in 64-bit mode.
+     * In block mode, window size is not known, so we have to be conservative.
+     * (note: but it could be evaluated from current-lowLimit)
+     */
+    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
+    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
+
+    /* Decode literals section */
+    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+        if (ZSTD_isError(litCSize)) return litCSize;
+        ip += litCSize;
+        srcSize -= litCSize;
+    }
+
+    /* Build Decoding Tables */
+    {
+        /* These macros control at build-time which decompressor implementation
+         * we use. If neither is defined, we do some inspection and dispatch at
+         * runtime.
+         */
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+        int usePrefetchDecoder = dctx->ddictIsCold;
+#endif
+        int nbSeq;
+        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+        if (ZSTD_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+        srcSize -= seqHSize;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+        if ( !usePrefetchDecoder
+          && (!frame || (dctx->fParams.windowSize > (1<<24)))
+          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
+            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+            usePrefetchDecoder = (shareLongOffsets >= minShare);
+        }
+#endif
+
+        dctx->ddictIsCold = 0;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+        if (usePrefetchDecoder)
+#endif
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+        /* else */
+        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+#endif
+    }
+}
+
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    size_t dSize;
+    ZSTD_checkContinuity(dctx, dst);
+    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+    dctx->previousDstEnd = (char*)dst + dSize;
+    return dSize;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_block.h b/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
new file mode 100644
index 0000000..7e92960
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_block.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DEC_BLOCK_H
+#define ZSTD_DEC_BLOCK_H
+
+/*-*******************************************************
+ *  Dependencies
+ *********************************************************/
+#include <stddef.h>   /* size_t */
+#include "zstd.h"    /* DCtx, and some public functions */
+#include "zstd_internal.h"  /* blockProperties_t, and some public functions */
+#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
+
+
+/* ===   Prototypes   === */
+
+/* note: prototypes already published within `zstd.h` :
+ * ZSTD_decompressBlock()
+ */
+
+/* note: prototypes already published within `zstd_internal.h` :
+ * ZSTD_getcBlockSize()
+ * ZSTD_decodeSeqHeaders()
+ */
+
+
+/* ZSTD_decompressBlock_internal() :
+ * decompress block, starting at `src`,
+ * into destination buffer `dst`.
+ * @return : decompressed block size,
+ *           or an error code (which can be tested using ZSTD_isError())
+ */
+size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize, const int frame);
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * this function must be called with valid parameters only
+ * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
+ * in which case it cannot fail.
+ * Internal use only.
+ */
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+             const short* normalizedCounter, unsigned maxSymbolValue,
+             const U32* baseValue, const U32* nbAdditionalBits,
+                   unsigned tableLog);
+
+
+#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h b/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
new file mode 100644
index 0000000..ccbdfa0
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress_internal.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* zstd_decompress_internal:
+ * objects and definitions shared within lib/decompress modules */
+
+ #ifndef ZSTD_DECOMPRESS_INTERNAL_H
+ #define ZSTD_DECOMPRESS_INTERNAL_H
+
+
+/*-*******************************************************
+ *  Dependencies
+ *********************************************************/
+#include "mem.h"             /* BYTE, U16, U32 */
+#include "zstd_internal.h"   /* ZSTD_seqSymbol */
+
+
+
+/*-*******************************************************
+ *  Constants
+ *********************************************************/
+static const U32 LL_base[MaxLL+1] = {
+                 0,    1,    2,     3,     4,     5,     6,      7,
+                 8,    9,   10,    11,    12,    13,    14,     15,
+                16,   18,   20,    22,    24,    28,    32,     40,
+                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                0x2000, 0x4000, 0x8000, 0x10000 };
+
+static const U32 OF_base[MaxOff+1] = {
+                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
+                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
+                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static const U32 OF_bits[MaxOff+1] = {
+                     0,  1,  2,  3,  4,  5,  6,  7,
+                     8,  9, 10, 11, 12, 13, 14, 15,
+                    16, 17, 18, 19, 20, 21, 22, 23,
+                    24, 25, 26, 27, 28, 29, 30, 31 };
+
+static const U32 ML_base[MaxML+1] = {
+                     3,  4,  5,    6,     7,     8,     9,    10,
+                    11, 12, 13,   14,    15,    16,    17,    18,
+                    19, 20, 21,   22,    23,    24,    25,    26,
+                    27, 28, 29,   30,    31,    32,    33,    34,
+                    35, 37, 39,   41,    43,    47,    51,    59,
+                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+/*-*******************************************************
+ *  Decompression types
+ *********************************************************/
+ typedef struct {
+     U32 fastMode;
+     U32 tableLog;
+ } ZSTD_seqSymbol_header;
+
+ typedef struct {
+     U16  nextState;
+     BYTE nbAdditionalBits;
+     BYTE nbBits;
+     U32  baseValue;
+ } ZSTD_seqSymbol;
+
+ #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
+
+typedef struct {
+    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
+    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
+    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
+    U32 rep[ZSTD_REP_NUM];
+} ZSTD_entropyDTables_t;
+
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+typedef enum {
+    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
+    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
+    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
+struct ZSTD_DCtx_s
+{
+    const ZSTD_seqSymbol* LLTptr;
+    const ZSTD_seqSymbol* MLTptr;
+    const ZSTD_seqSymbol* OFTptr;
+    const HUF_DTable* HUFptr;
+    ZSTD_entropyDTables_t entropy;
+    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
+    const void* previousDstEnd;   /* detect continuity */
+    const void* prefixStart;      /* start of current segment */
+    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
+    const void* dictEnd;          /* end of previous segment */
+    size_t expected;
+    ZSTD_frameHeader fParams;
+    U64 decodedSize;
+    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+    ZSTD_dStage stage;
+    U32 litEntropy;
+    U32 fseEntropy;
+    XXH64_state_t xxhState;
+    size_t headerSize;
+    ZSTD_format_e format;
+    const BYTE* litPtr;
+    ZSTD_customMem customMem;
+    size_t litSize;
+    size_t rleSize;
+    size_t staticSize;
+    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+
+    /* dictionary */
+    ZSTD_DDict* ddictLocal;
+    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+    U32 dictID;
+    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+    ZSTD_dictUses_e dictUses;
+
+    /* streaming */
+    ZSTD_dStreamStage streamStage;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    size_t maxWindowSize;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t lhSize;
+    void* legacyContext;
+    U32 previousLegacyVersion;
+    U32 legacyVersion;
+    U32 hostageByte;
+    int noForwardProgress;
+
+    /* workspace */
+    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+
+/*-*******************************************************
+ *  Shared internal functions
+ *********************************************************/
+
+/*! ZSTD_loadDEntropy() :
+ *  dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+                   const void* const dict, size_t const dictSize);
+
+/*! ZSTD_checkContinuity() :
+ *  check if next `dst` follows previous position, where decompression ended.
+ *  If yes, do nothing (continue on current segment).
+ *  If not, classify previous segment as "external dictionary", and start a new segment.
+ *  This function cannot fail. */
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst);
+
+
+#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
index 86e6b39..5957255 100644
--- a/vendor/github.com/DataDog/zstd/zstd_double_fast.c
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
@@ -13,12 +13,12 @@
 
 
 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              ZSTD_compressionParameters const* cParams,
-                              void const* end)
+                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashLarge = ms->hashTable;
     U32  const hBitsL = cParams->hashLog;
-    U32  const mls = cParams->searchLength;
+    U32  const mls = cParams->minMatch;
     U32* const hashSmall = ms->chainTable;
     U32  const hBitsS = cParams->chainLog;
     const BYTE* const base = ms->window.base;
@@ -40,17 +40,20 @@
                 hashSmall[smHash] = current + i;
             if (i == 0 || hashLarge[lgHash] == 0)
                 hashLarge[lgHash] = current + i;
-        }
-    }
+            /* Only load extra positions for ZSTD_dtlm_full */
+            if (dtlm == ZSTD_dtlm_fast)
+                break;
+    }   }
 }
 
 
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_compressBlock_doubleFast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
-        U32 const mls /* template */)
+        void const* src, size_t srcSize,
+        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
 {
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32* const hashLong = ms->hashTable;
     const U32 hBitsL = cParams->hashLog;
     U32* const hashSmall = ms->chainTable;
@@ -59,235 +62,424 @@
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32 lowestIndex = ms->window.dictLimit;
-    const BYTE* const lowest = base + lowestIndex;
+    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32 lowestValid = ms->window.dictLimit;
+    const U32 maxDistance = 1U << cParams->windowLog;
+    const U32 prefixLowestIndex = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
+    const BYTE* const prefixLowest = base + prefixLowestIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - HASH_READ_SIZE;
     U32 offset_1=rep[0], offset_2=rep[1];
     U32 offsetSaved = 0;
 
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dictCParams =
+                                     dictMode == ZSTD_dictMatchState ?
+                                     &dms->cParams : NULL;
+    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
+                                     dms->hashTable : NULL;
+    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
+                                     dms->chainTable : NULL;
+    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.dictLimit : 0;
+    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.base : NULL;
+    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
+                                     dictBase + dictStartIndex : NULL;
+    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.nextSrc : NULL;
+    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
+                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
+                                     0;
+    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
+                                     dictCParams->hashLog : hBitsL;
+    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
+                                     dictCParams->chainLog : hBitsS;
+    const U32 dictAndPrefixLength  = (U32)(ip - prefixLowest + dictEnd - dictStart);
+
+    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
+
+    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
+    /* if a dictionary is attached, it must be within window range */
+    if (dictMode == ZSTD_dictMatchState) {
+        assert(lowestValid + maxDistance >= endIndex);
+    }
+
     /* init */
-    ip += (ip==lowest);
-    {   U32 const maxRep = (U32)(ip-lowest);
+    ip += (dictAndPrefixLength == 0);
+    if (dictMode == ZSTD_noDict) {
+        U32 const maxRep = (U32)(ip - prefixLowest);
         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     }
+    if (dictMode == ZSTD_dictMatchState) {
+        /* dictMatchState repCode checks don't currently handle repCode == 0
+         * disabling. */
+        assert(offset_1 <= dictAndPrefixLength);
+        assert(offset_2 <= dictAndPrefixLength);
+    }
 
     /* Main Search Loop */
     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
         size_t mLength;
+        U32 offset;
         size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
         size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
         U32 const current = (U32)(ip-base);
         U32 const matchIndexL = hashLong[h2];
-        U32 const matchIndexS = hashSmall[h];
+        U32 matchIndexS = hashSmall[h];
         const BYTE* matchLong = base + matchIndexL;
         const BYTE* match = base + matchIndexS;
+        const U32 repIndex = current + 1 - offset_1;
+        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+                            && repIndex < prefixLowestIndex) ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
         hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
 
-        assert(offset_1 <= current);   /* supposed guaranteed by construction */
-        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
-            /* favor repcode */
+        /* check dictMatchState repcode */
+        if (dictMode == ZSTD_dictMatchState
+            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            goto _match_stored;
+        }
+
+        /* check noDict repcode */
+        if ( dictMode == ZSTD_noDict
+          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
-        } else {
-            U32 offset;
-            if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+            goto _match_stored;
+        }
+
+        if (matchIndexL > prefixLowestIndex) {
+            /* check prefix long match */
+            if (MEM_read64(matchLong) == MEM_read64(ip)) {
                 mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
                 offset = (U32)(ip-matchLong);
-                while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
-            } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
-                size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
-                U32 const matchIndexL3 = hashLong[hl3];
-                const BYTE* matchL3 = base + matchIndexL3;
-                hashLong[hl3] = current + 1;
-                if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
+                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+                goto _match_found;
+            }
+        } else if (dictMode == ZSTD_dictMatchState) {
+            /* check dictMatchState long match */
+            U32 const dictMatchIndexL = dictHashLong[dictHL];
+            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+            assert(dictMatchL < dictEnd);
+
+            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+                offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
+                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+                goto _match_found;
+        }   }
+
+        if (matchIndexS > prefixLowestIndex) {
+            /* check prefix short match */
+            if (MEM_read32(match) == MEM_read32(ip)) {
+                goto _search_next_long;
+            }
+        } else if (dictMode == ZSTD_dictMatchState) {
+            /* check dictMatchState short match */
+            U32 const dictMatchIndexS = dictHashSmall[dictHS];
+            match = dictBase + dictMatchIndexS;
+            matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+                goto _search_next_long;
+        }   }
+
+        ip += ((ip-anchor) >> kSearchStrength) + 1;
+        continue;
+
+_search_next_long:
+
+        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+            U32 const matchIndexL3 = hashLong[hl3];
+            const BYTE* matchL3 = base + matchIndexL3;
+            hashLong[hl3] = current + 1;
+
+            /* check prefix long +1 match */
+            if (matchIndexL3 > prefixLowestIndex) {
+                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
                     mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
                     ip++;
                     offset = (U32)(ip-matchL3);
-                    while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
-                } else {
-                    mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-                    offset = (U32)(ip-match);
-                    while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+                    goto _match_found;
                 }
-            } else {
-                ip += ((ip-anchor) >> kSearchStrength) + 1;
-                continue;
-            }
+            } else if (dictMode == ZSTD_dictMatchState) {
+                /* check dict long +1 match */
+                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+                assert(dictMatchL3 < dictEnd);
+                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+                    ip++;
+                    offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
+                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+                    goto _match_found;
+        }   }   }
 
-            offset_2 = offset_1;
-            offset_1 = offset;
-
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        /* if no long +1 match, explore the short match we found */
+        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+            offset = (U32)(current - matchIndexS);
+            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+        } else {
+            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+            offset = (U32)(ip - match);
+            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
         }
 
+        /* fall-through */
+
+_match_found:
+        offset_2 = offset_1;
+        offset_1 = offset;
+
+        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+_match_stored:
         /* match found */
         ip += mLength;
         anchor = ip;
 
         if (ip <= ilimit) {
-            /* Fill Table */
-            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
-                hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;  /* here because current+2 could be > iend-8 */
-            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
-                hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+            /* Complementary insertion */
+            /* done after iLimit test, as candidates could be > iend-8 */
+            {   U32 const indexToInsert = current+2;
+                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+            }
 
             /* check immediate repcode */
-            while ( (ip <= ilimit)
-                 && ( (offset_2>0)
-                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                /* store sequence */
-                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
-                hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
-                hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                ip += rLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-    }   }   }
+            if (dictMode == ZSTD_dictMatchState) {
+                while (ip <= ilimit) {
+                    U32 const current2 = (U32)(ip-base);
+                    U32 const repIndex2 = current2 - offset_2;
+                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
+                        && repIndex2 < prefixLowestIndex ?
+                            dictBase - dictIndexDelta + repIndex2 :
+                            base + repIndex2;
+                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+                        ip += repLength2;
+                        anchor = ip;
+                        continue;
+                    }
+                    break;
+            }   }
+
+            if (dictMode == ZSTD_noDict) {
+                while ( (ip <= ilimit)
+                     && ( (offset_2>0)
+                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                    /* store sequence */
+                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                    ip += rLength;
+                    anchor = ip;
+                    continue;   /* faster when present ... (?) */
+        }   }   }
+    }   /* while (ip < ilimit) */
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
     rep[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
 size_t ZSTD_compressBlock_doubleFast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    const U32 mls = cParams->searchLength;
+    const U32 mls = ms->cParams.minMatch;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
     case 5 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
     case 6 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
     case 7 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+    }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    const U32 mls = ms->cParams.minMatch;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+    case 5 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+    case 6 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+    case 7 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
     }
 }
 
 
 static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+        void const* src, size_t srcSize,
         U32 const mls /* template */)
 {
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32* const hashLong = ms->hashTable;
     U32  const hBitsL = cParams->hashLog;
     U32* const hashSmall = ms->chainTable;
     U32  const hBitsS = cParams->chainLog;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.lowLimit;
-    const BYTE* const dictStart = dictBase + lowestIndex;
-    const U32   dictLimit = ms->window.dictLimit;
-    const BYTE* const lowPrefixPtr = base + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ms->window.base;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   maxDistance = 1U << cParams->windowLog;
+    const U32   lowestValid = ms->window.lowLimit;
+    const U32   lowLimit = (endIndex - lowestValid > maxDistance) ? endIndex - maxDistance : lowestValid;
+    const U32   dictStartIndex = lowLimit;
+    const U32   dictLimit = ms->window.dictLimit;
+    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const BYTE* const dictStart = dictBase + dictStartIndex;
+    const BYTE* const dictEnd = dictBase + prefixStartIndex;
     U32 offset_1=rep[0], offset_2=rep[1];
 
+    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
+    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
+    if (prefixStartIndex == dictStartIndex)
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
+
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
         const U32 matchIndex = hashSmall[hSmall];
-        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
         const BYTE* match = matchBase + matchIndex;
 
         const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
         const U32 matchLongIndex = hashLong[hLong];
-        const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
         const BYTE* matchLong = matchLongBase + matchLongIndex;
 
         const U32 current = (U32)(ip-base);
         const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
-        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
-        const BYTE* repMatch = repBase + repIndex;
+        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+        const BYTE* const repMatch = repBase + repIndex;
         size_t mLength;
         hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
 
-        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
-           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+            & (repIndex > dictStartIndex))
+          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
         } else {
-            if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
-                const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
                 U32 offset;
-                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
                 offset = current - matchLongIndex;
                 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
-            } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
                 U32 const matchIndex3 = hashLong[h3];
-                const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
                 const BYTE* match3 = match3Base + matchIndex3;
                 U32 offset;
                 hashLong[h3] = current + 1;
-                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
-                    const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
-                    const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
-                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
                     ip++;
                     offset = current+1 - matchIndex3;
                     while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
                 } else {
-                    const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
-                    const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
-                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                     offset = current - matchIndex;
                     while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                 }
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
             } else {
                 ip += ((ip-anchor) >> kSearchStrength) + 1;
                 continue;
         }   }
 
-        /* found a match : store it */
+        /* move to next sequence start */
         ip += mLength;
         anchor = ip;
 
         if (ip <= ilimit) {
-            /* Fill Table */
-            hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
-            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
-            hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
-            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+            /* Complementary insertion */
+            /* done after iLimit test, as candidates could be > iend-8 */
+            {   U32 const indexToInsert = current+2;
+                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+            }
+
             /* check immediate repcode */
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
                 U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
-                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
-                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+                    & (repIndex2 > dictStartIndex))
+                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                     ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
                     hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
                     hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
@@ -303,25 +495,25 @@
     rep[1] = offset_2;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
 size_t ZSTD_compressBlock_doubleFast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const mls = cParams->searchLength;
+    U32 const mls = ms->cParams.minMatch;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.h b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
index 6d80b27..4fa31ac 100644
--- a/vendor/github.com/DataDog/zstd/zstd_double_fast.h
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
@@ -19,14 +19,16 @@
 #include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
 
 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              ZSTD_compressionParameters const* cParams,
-                              void const* end);
+                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
 size_t ZSTD_compressBlock_doubleFast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_doubleFast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 
 #if defined (__cplusplus)
diff --git a/vendor/github.com/DataDog/zstd/zstd_errors.h b/vendor/github.com/DataDog/zstd/zstd_errors.h
index 57533f2..92a3433 100644
--- a/vendor/github.com/DataDog/zstd/zstd_errors.h
+++ b/vendor/github.com/DataDog/zstd/zstd_errors.h
@@ -72,6 +72,7 @@
   ZSTD_error_workSpace_tooSmall= 66,
   ZSTD_error_dstSize_tooSmall = 70,
   ZSTD_error_srcSize_wrong    = 72,
+  ZSTD_error_dstBuffer_null   = 74,
   /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
   ZSTD_error_frameIndex_tooLarge = 100,
   ZSTD_error_seekableIO          = 102,
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c
index df4d28b..a05b8a4 100644
--- a/vendor/github.com/DataDog/zstd/zstd_fast.c
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.c
@@ -13,12 +13,13 @@
 
 
 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        ZSTD_compressionParameters const* cParams,
-                        void const* end)
+                        const void* const end,
+                        ZSTD_dictTableLoadMethod_e dtlm)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
     U32  const hBits = cParams->hashLog;
-    U32  const mls = cParams->searchLength;
+    U32  const mls = cParams->minMatch;
     const BYTE* const base = ms->window.base;
     const BYTE* ip = base + ms->nextToUpdate;
     const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
@@ -27,69 +28,278 @@
     /* Always insert every fastHashFillStep position into the hash table.
      * Insert the other positions if their hash entry is empty.
      */
-    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
         U32 const current = (U32)(ip - base);
-        U32 i;
-        for (i = 0; i < fastHashFillStep; ++i) {
-            size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
-            if (i == 0 || hashTable[hash] == 0)
-                hashTable[hash] = current + i;
-        }
-    }
+        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+        hashTable[hash0] = current;
+        if (dtlm == ZSTD_dtlm_fast) continue;
+        /* Only load extra positions for ZSTD_dtlm_full */
+        {   U32 p;
+            for (p = 1; p < fastHashFillStep; ++p) {
+                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+                if (hashTable[hash] == 0) {  /* not yet filled */
+                    hashTable[hash] = current + p;
+    }   }   }   }
 }
 
+
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_compressBlock_fast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize,
-        U32 const hlog, U32 const stepSize, U32 const mls)
+        U32 const mls)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
     const BYTE* const base = ms->window.base;
     const BYTE* const istart = (const BYTE*)src;
-    const BYTE* ip = istart;
+    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
+    const BYTE* ip0 = istart;
+    const BYTE* ip1;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.dictLimit;
-    const BYTE* const lowest = base + lowestIndex;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   maxDistance = 1U << cParams->windowLog;
+    const U32   validStartIndex = ms->window.dictLimit;
+    const U32   prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex;
+    const BYTE* const prefixStart = base + prefixStartIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - HASH_READ_SIZE;
     U32 offset_1=rep[0], offset_2=rep[1];
     U32 offsetSaved = 0;
 
     /* init */
-    ip += (ip==lowest);
-    {   U32 const maxRep = (U32)(ip-lowest);
+    ip0 += (ip0 == prefixStart);
+    ip1 = ip0 + 1;
+    {
+        U32 const maxRep = (U32)(ip0 - prefixStart);
         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     }
 
     /* Main Search Loop */
+    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
+        size_t mLength;
+        BYTE const* ip2 = ip0 + 2;
+        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
+        U32 const val0 = MEM_read32(ip0);
+        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
+        U32 const val1 = MEM_read32(ip1);
+        U32 const current0 = (U32)(ip0-base);
+        U32 const current1 = (U32)(ip1-base);
+        U32 const matchIndex0 = hashTable[h0];
+        U32 const matchIndex1 = hashTable[h1];
+        BYTE const* repMatch = ip2-offset_1;
+        const BYTE* match0 = base + matchIndex0;
+        const BYTE* match1 = base + matchIndex1;
+        U32 offcode;
+        hashTable[h0] = current0;   /* update hash table */
+        hashTable[h1] = current1;   /* update hash table */
+
+        assert(ip0 + 1 == ip1);
+
+        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
+            mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
+            ip0 = ip2 - mLength;
+            match0 = repMatch - mLength;
+            offcode = 0;
+            goto _match;
+        }
+        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
+            /* found a regular match */
+            goto _offset;
+        }
+        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
+            /* found a regular match after one literal */
+            ip0 = ip1;
+            match0 = match1;
+            goto _offset;
+        }
+        {
+            size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+            assert(step >= 2);
+            ip0 += step;
+            ip1 += step;
+            continue;
+        }
+_offset: /* Requires: ip0, match0 */
+        /* Compute the offset code */
+        offset_2 = offset_1;
+        offset_1 = (U32)(ip0-match0);
+        offcode = offset_1 + ZSTD_REP_MOVE;
+        mLength = 0;
+        /* Count the backwards match length */
+        while (((ip0>anchor) & (match0>prefixStart))
+             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+
+_match: /* Requires: ip0, match0, offcode */
+        /* Count the forward length */
+        mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
+        ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
+        /* match found */
+        ip0 += mLength;
+        anchor = ip0;
+        ip1 = ip0 + 1;
+
+        if (ip0 <= ilimit) {
+            /* Fill Table */
+            assert(base+current0+2 > istart);  /* check base overflow */
+            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
+            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+            while ( (ip0 <= ilimit)
+                 && ( (offset_2>0)
+                    & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
+                U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+                ip0 += rLength;
+                ip1 = ip0 + 1;
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                anchor = ip0;
+                continue;   /* faster when present (confirmed on gcc-8) ... (?) */
+            }
+        }
+    }
+
+    /* save reps for next block */
+    rep[0] = offset_1 ? offset_1 : offsetSaved;
+    rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Return the last literals size */
+    return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_fast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
+    U32 const mls = cParams->minMatch;
+    assert(ms->dictMatchState == NULL);
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+    }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize, U32 const mls)
+{
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+    const BYTE* const base = ms->window.base;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   prefixStartIndex = ms->window.dictLimit;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=rep[0], offset_2=rep[1];
+    U32 offsetSaved = 0;
+
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+    const U32* const dictHashTable = dms->hashTable;
+    const U32 dictStartIndex       = dms->window.dictLimit;
+    const BYTE* const dictBase     = dms->window.base;
+    const BYTE* const dictStart    = dictBase + dictStartIndex;
+    const BYTE* const dictEnd      = dms->window.nextSrc;
+    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
+    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
+    const U32 dictHLog             = dictCParams->hashLog;
+
+    /* if a dictionary is still attached, it necessarily means that
+     * it is within window size. So we just check it. */
+    const U32 maxDistance = 1U << cParams->windowLog;
+    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+    assert(endIndex - prefixStartIndex <= maxDistance);
+    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
+
+    /* ensure there will be no no underflow
+     * when translating a dict index into a local index */
+    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+
+    /* init */
+    ip += (dictAndPrefixLength == 0);
+    /* dictMatchState repCode checks don't currently handle repCode == 0
+     * disabling. */
+    assert(offset_1 <= dictAndPrefixLength);
+    assert(offset_2 <= dictAndPrefixLength);
+
+    /* Main Search Loop */
     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
         size_t mLength;
         size_t const h = ZSTD_hashPtr(ip, hlog, mls);
         U32 const current = (U32)(ip-base);
         U32 const matchIndex = hashTable[h];
         const BYTE* match = base + matchIndex;
+        const U32 repIndex = current + 1 - offset_1;
+        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
         hashTable[h] = current;   /* update hash table */
 
-        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
-            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
-        } else {
-            if ( (matchIndex <= lowestIndex)
-              || (MEM_read32(match) != MEM_read32(ip)) ) {
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
+        } else if ( (matchIndex <= prefixStartIndex) ) {
+            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+            U32 const dictMatchIndex = dictHashTable[dictHash];
+            const BYTE* dictMatch = dictBase + dictMatchIndex;
+            if (dictMatchIndex <= dictStartIndex ||
+                MEM_read32(dictMatch) != MEM_read32(ip)) {
                 assert(stepSize >= 1);
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
-            }
-            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-            {   U32 const offset = (U32)(ip-match);
-                while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+            } else {
+                /* found a dict match */
+                U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+                while (((ip>anchor) & (dictMatch>dictStart))
+                     && (ip[-1] == dictMatch[-1])) {
+                    ip--; dictMatch--; mLength++;
+                } /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-        }   }
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+            }
+        } else if (MEM_read32(match) != MEM_read32(ip)) {
+            /* it's not a match, and we're not going to check the dictionary */
+            assert(stepSize >= 1);
+            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+            continue;
+        } else {
+            /* found a regular match */
+            U32 const offset = (U32)(ip-match);
+            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+            while (((ip>anchor) & (match>prefixStart))
+                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+            offset_2 = offset_1;
+            offset_1 = offset;
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }
 
         /* match found */
         ip += mLength;
@@ -97,108 +307,131 @@
 
         if (ip <= ilimit) {
             /* Fill Table */
+            assert(base+current+2 > istart);  /* check base overflow */
             hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;  /* here because current+2 could be > iend-8 */
             hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
             /* check immediate repcode */
-            while ( (ip <= ilimit)
-                 && ( (offset_2>0)
-                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                /* store sequence */
-                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; }  /* swap offset_2 <=> offset_1 */
-                hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                ip += rLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-    }   }   }
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+                        dictBase - dictIndexDelta + repIndex2 :
+                        base + repIndex2;
+                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+            }
+        }
+    }
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
     rep[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
-
-size_t ZSTD_compressBlock_fast(
+size_t ZSTD_compressBlock_fast_dictMatchState(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const hlog = cParams->hashLog;
-    U32 const mls = cParams->searchLength;
-    U32 const stepSize = cParams->targetLength;
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
+    U32 const mls = cParams->minMatch;
+    assert(ms->dictMatchState != NULL);
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
 
 
 static size_t ZSTD_compressBlock_fast_extDict_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize,
-        U32 const hlog, U32 const stepSize, U32 const mls)
+        void const* src, size_t srcSize, U32 const mls)
 {
-    U32* hashTable = ms->hashTable;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
     const BYTE* const base = ms->window.base;
     const BYTE* const dictBase = ms->window.dictBase;
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.lowLimit;
-    const BYTE* const dictStart = dictBase + lowestIndex;
+    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
+    const U32   maxDistance = 1U << cParams->windowLog;
+    const U32   validLow = ms->window.lowLimit;
+    const U32   lowLimit = (endIndex - validLow > maxDistance) ? endIndex - maxDistance : validLow;
+    const U32   dictStartIndex = lowLimit;
+    const BYTE* const dictStart = dictBase + dictStartIndex;
     const U32   dictLimit = ms->window.dictLimit;
-    const BYTE* const lowPrefixPtr = base + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
+    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const BYTE* const dictEnd = dictBase + prefixStartIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
     U32 offset_1=rep[0], offset_2=rep[1];
 
+    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
+    if (prefixStartIndex == dictStartIndex)
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
+
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t h = ZSTD_hashPtr(ip, hlog, mls);
-        const U32 matchIndex = hashTable[h];
-        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
-        const BYTE* match = matchBase + matchIndex;
-        const U32 current = (U32)(ip-base);
-        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
-        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
-        const BYTE* repMatch = repBase + repIndex;
+        const U32    matchIndex = hashTable[h];
+        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+        const BYTE*  match = matchBase + matchIndex;
+        const U32    current = (U32)(ip-base);
+        const U32    repIndex = current + 1 - offset_1;
+        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+        const BYTE* const repMatch = repBase + repIndex;
         size_t mLength;
         hashTable[h] = current;   /* update hash table */
+        assert(offset_1 <= current +1);   /* check repIndex */
 
-        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, 0, mLength-MINMATCH);
         } else {
-            if ( (matchIndex < lowestIndex) ||
+            if ( (matchIndex < dictStartIndex) ||
                  (MEM_read32(match) != MEM_read32(ip)) ) {
                 assert(stepSize >= 1);
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
             }
-            {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+            {   const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
                 U32 offset;
-                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                 offset = current - matchIndex;
                 offset_2 = offset_1;
                 offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
         }   }
 
         /* found a match : store it */
@@ -213,11 +446,11 @@
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
                 U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                     ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
@@ -233,27 +466,26 @@
     rep[1] = offset_2;
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
 size_t ZSTD_compressBlock_fast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const hlog = cParams->hashLog;
-    U32 const mls = cParams->searchLength;
-    U32 const stepSize = cParams->targetLength;
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
+    U32 const mls = cParams->minMatch;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.h b/vendor/github.com/DataDog/zstd/zstd_fast.h
index f0438ad..b74a88c 100644
--- a/vendor/github.com/DataDog/zstd/zstd_fast.h
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.h
@@ -19,14 +19,16 @@
 #include "zstd_compress_internal.h"
 
 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        ZSTD_compressionParameters const* cParams,
-                        void const* end);
+                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
 size_t ZSTD_compressBlock_fast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_fast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 #if defined (__cplusplus)
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h
index 65c08a8..81b16ea 100644
--- a/vendor/github.com/DataDog/zstd/zstd_internal.h
+++ b/vendor/github.com/DataDog/zstd/zstd_internal.h
@@ -21,6 +21,7 @@
 ***************************************/
 #include "compiler.h"
 #include "mem.h"
+#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
 #include "error_private.h"
 #define ZSTD_STATIC_LINKING_ONLY
 #include "zstd.h"
@@ -33,48 +34,15 @@
 #endif
 #include "xxhash.h"                /* XXH_reset, update, digest */
 
-
 #if defined (__cplusplus)
 extern "C" {
 #endif
 
-
-/*-*************************************
-*  Debug
-***************************************/
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
-#  include <assert.h>
-#else
-#  ifndef assert
-#    define assert(condition) ((void)0)
-#  endif
-#endif
-
-#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
-
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
-#  include <stdio.h>
-extern int g_debuglog_enable;
-/* recommended values for ZSTD_DEBUG display levels :
- * 1 : no display, enables assert() only
- * 2 : reserved for currently active debug path
- * 3 : events once per object lifetime (CCtx, CDict, etc.)
- * 4 : events once per frame
- * 5 : events once per block
- * 6 : events once per sequence (*very* verbose) */
-#  define RAWLOG(l, ...) {                                      \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __VA_ARGS__);               \
-            }   }
-#  define DEBUGLOG(l, ...) {                                    \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
-                    fprintf(stderr, " \n");                     \
-            }   }
-#else
-#  define RAWLOG(l, ...)      {}    /* disabled */
-#  define DEBUGLOG(l, ...)    {}    /* disabled */
-#endif
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+#define ZSTD_isError ERR_isError   /* for inlining */
+#define FSE_isError  ERR_isError
+#define HUF_isError  ERR_isError
 
 
 /*-*************************************
@@ -84,8 +52,50 @@
 #undef MAX
 #define MIN(a,b) ((a)<(b) ? (a) : (b))
 #define MAX(a,b) ((a)>(b) ? (a) : (b))
-#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; }  /* check and Forward error code */
-#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); }  /* check and send Error code */
+
+/**
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information. In order to do that
+ * (particularly, printing the conditional that failed), this can't just wrap
+ * RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+  if (cond) { \
+    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
+    RAWLOG(3, ": " __VA_ARGS__); \
+    RAWLOG(3, "\n"); \
+    return ERROR(err); \
+  }
+
+/**
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+  do { \
+    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
+    RAWLOG(3, ": " __VA_ARGS__); \
+    RAWLOG(3, "\n"); \
+    return ERROR(err); \
+  } while(0);
+
+/**
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+  do { \
+    size_t const err_code = (err); \
+    if (ERR_isError(err_code)) { \
+      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
+      RAWLOG(3, ": " __VA_ARGS__); \
+      RAWLOG(3, "\n"); \
+      return err_code; \
+    } \
+  } while(0);
 
 
 /*-*************************************
@@ -109,12 +119,10 @@
 #define BIT0   1
 
 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
-#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
 static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
 static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
 
-#define ZSTD_FRAMEIDSIZE 4
-static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE;  /* magic number size */
+#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
 
 #define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
 static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
@@ -184,19 +192,72 @@
 *  Shared functions to include for inlining
 *********************************************/
 static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
 #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+static void ZSTD_copy16(void* dst, const void* src) { memcpy(dst, src, 16); }
+#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+
+#define WILDCOPY_OVERLENGTH 8
+#define VECLEN 16
+
+typedef enum {
+    ZSTD_no_overlap,
+    ZSTD_overlap_src_before_dst,
+    /*  ZSTD_overlap_dst_before_src, */
+} ZSTD_overlap_e;
 
 /*! ZSTD_wildcopy() :
  *  custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
-#define WILDCOPY_OVERLENGTH 8
-MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
 {
+    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
     const BYTE* ip = (const BYTE*)src;
     BYTE* op = (BYTE*)dst;
     BYTE* const oend = op + length;
-    do
-        COPY8(op, ip)
-    while (op < oend);
+
+    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
+    if (length < VECLEN || (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN)) {
+      do
+          COPY8(op, ip)
+      while (op < oend);
+    }
+    else {
+      if ((length & 8) == 0)
+        COPY8(op, ip);
+      do {
+        COPY16(op, ip);
+      }
+      while (op < oend);
+    }
+}
+
+/*! ZSTD_wildcopy_16min() :
+ *  same semantics as ZSTD_wilcopy() except guaranteed to be able to copy 16 bytes at the start */
+MEM_STATIC FORCE_INLINE_ATTR DONT_VECTORIZE
+void ZSTD_wildcopy_16min(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e ovtype)
+{
+    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+
+    assert(length >= 8);
+    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff < -8));
+
+    if (ovtype == ZSTD_overlap_src_before_dst && diff < VECLEN) {
+      do
+          COPY8(op, ip)
+      while (op < oend);
+    }
+    else {
+      if ((length & 8) == 0)
+        COPY8(op, ip);
+      do {
+        COPY16(op, ip);
+      }
+      while (op < oend);
+    }
 }
 
 MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
@@ -227,10 +288,23 @@
     BYTE* llCode;
     BYTE* mlCode;
     BYTE* ofCode;
+    size_t maxNbSeq;
+    size_t maxNbLit;
     U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
     U32   longLengthPos;
 } seqStore_t;
 
+/**
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ *       similarly, before using `decompressedBound`, check for errors using:
+ *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+    size_t compressedSize;
+    unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo;   /* decompress & legacy */
+
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
 
@@ -275,7 +349,7 @@
     blockType_e blockType;
     U32 lastBlock;
     U32 origSize;
-} blockProperties_t;
+} blockProperties_t;   /* declared here for decompress and fullbench */
 
 /*! ZSTD_getcBlockSize() :
  *  Provides the size of compressed block from block header `src` */
@@ -283,6 +357,13 @@
 size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
                           blockProperties_t* bpPtr);
 
+/*! ZSTD_decodeSeqHeaders() :
+ *  decode sequence header from src */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+                       const void* src, size_t srcSize);
+
+
 #if defined (__cplusplus)
 }
 #endif
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c
index 9f15812..94d906c 100644
--- a/vendor/github.com/DataDog/zstd/zstd_lazy.c
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.c
@@ -16,11 +16,12 @@
 *  Binary Tree search
 ***************************************/
 
-void ZSTD_updateDUBT(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
                 const BYTE* ip, const BYTE* iend,
                 U32 mls)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
     U32  const hashLog = cParams->hashLog;
 
@@ -59,14 +60,16 @@
  *  sort one already inserted but unsorted position
  *  assumption : current >= btlow == (current - btmask)
  *  doesn't fail */
-static void ZSTD_insertDUBT1(
-                 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
                  U32 current, const BYTE* inputEnd,
-                 U32 nbCompares, U32 btLow, int extDict)
+                 U32 nbCompares, U32 btLow,
+                 const ZSTD_dictMode_e dictMode)
 {
-    U32*   const bt = ms->chainTable;
-    U32    const btLog  = cParams->chainLog - 1;
-    U32    const btMask = (1 << btLog) - 1;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32* const bt = ms->chainTable;
+    U32  const btLog  = cParams->chainLog - 1;
+    U32  const btMask = (1 << btLog) - 1;
     size_t commonLengthSmaller=0, commonLengthLarger=0;
     const BYTE* const base = ms->window.base;
     const BYTE* const dictBase = ms->window.dictBase;
@@ -78,9 +81,12 @@
     const BYTE* match;
     U32* smallerPtr = bt + 2*(current&btMask);
     U32* largerPtr  = smallerPtr + 1;
-    U32 matchIndex = *smallerPtr;
+    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
     U32 dummy32;   /* to be nullified at the end */
-    U32 const windowLow = ms->window.lowLimit;
+    U32 const windowValid = ms->window.lowLimit;
+    U32 const maxDistance = 1U << cParams->windowLog;
+    U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
+
 
     DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
                 current, dictLimit, windowLow);
@@ -91,11 +97,16 @@
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         assert(matchIndex < current);
+        /* note : all candidates are now supposed sorted,
+         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
 
-        if ( (!extDict)
+        if ( (dictMode != ZSTD_extDict)
           || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
           || (current < dictLimit) /* both in extDict */) {
-            const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
+            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+                                     || (matchIndex+matchLength >= dictLimit)) ?
+                                        base : dictBase;
             assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
                  || (current < dictLimit) );
             match = mBase + matchIndex;
@@ -104,7 +115,7 @@
             match = dictBase + matchIndex;
             matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
             if (matchIndex+matchLength >= dictLimit)
-                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
         }
 
         DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
@@ -138,13 +149,92 @@
 }
 
 
-static size_t ZSTD_DUBT_findBestMatch (
-                            ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                            const BYTE* const ip, const BYTE* const iend,
-                            size_t* offsetPtr,
-                            U32 const mls,
-                            U32 const extDict)
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+        ZSTD_matchState_t* ms,
+        const BYTE* const ip, const BYTE* const iend,
+        size_t* offsetPtr,
+        size_t bestLength,
+        U32 nbCompares,
+        U32 const mls,
+        const ZSTD_dictMode_e dictMode)
 {
+    const ZSTD_matchState_t * const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+    const U32 * const dictHashTable = dms->hashTable;
+    U32         const hashLog = dmsCParams->hashLog;
+    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32               dictMatchIndex = dictHashTable[h];
+
+    const BYTE* const base = ms->window.base;
+    const BYTE* const prefixStart = base + ms->window.dictLimit;
+    U32         const current = (U32)(ip-base);
+    const BYTE* const dictBase = dms->window.base;
+    const BYTE* const dictEnd = dms->window.nextSrc;
+    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+    U32         const dictLowLimit = dms->window.lowLimit;
+    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+    U32*        const dictBt = dms->chainTable;
+    U32         const btLog  = dmsCParams->chainLog - 1;
+    U32         const btMask = (1 << btLog) - 1;
+    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+    (void)dictMode;
+    assert(dictMode == ZSTD_dictMatchState);
+
+    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
+        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        const BYTE* match = dictBase + dictMatchIndex;
+        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+        if (dictMatchIndex+matchLength >= dictHighLimit)
+            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
+
+        if (matchLength > bestLength) {
+            U32 matchIndex = dictMatchIndex + dictIndexDelta;
+            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+                    current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
+                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+            }
+            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+            }
+        }
+
+        if (match[matchLength] < ip[matchLength]) {
+            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+        } else {
+            /* match is larger than current */
+            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
+            commonLengthLarger = matchLength;
+            dictMatchIndex = nextPtr[0];
+        }
+    }
+
+    if (bestLength >= MINMATCH) {
+        U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+                    current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+    }
+    return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+                        const BYTE* const ip, const BYTE* const iend,
+                        size_t* offsetPtr,
+                        U32 const mls,
+                        const ZSTD_dictMode_e dictMode)
+{
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32*   const hashTable = ms->hashTable;
     U32    const hashLog = cParams->hashLog;
     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
@@ -152,7 +242,9 @@
 
     const BYTE* const base = ms->window.base;
     U32    const current = (U32)(ip-base);
-    U32    const windowLow = ms->window.lowLimit;
+    U32    const maxDistance = 1U << cParams->windowLog;
+    U32    const windowValid = ms->window.lowLimit;
+    U32    const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
 
     U32*   const bt = ms->chainTable;
     U32    const btLog  = cParams->chainLog - 1;
@@ -175,7 +267,7 @@
          && (nbCandidates > 1) ) {
         DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
                     matchIndex);
-        *unsortedMark = previousCandidate;
+        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
         previousCandidate = matchIndex;
         matchIndex = *nextCandidate;
         nextCandidate = bt + 2*(matchIndex&btMask);
@@ -183,11 +275,13 @@
         nbCandidates --;
     }
 
+    /* nullify last candidate if it's still unsorted
+     * simplification, detrimental to compression ratio, beneficial for speed */
     if ( (matchIndex > unsortLimit)
       && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
         DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
                     matchIndex);
-        *nextCandidate = *unsortedMark = 0;   /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
+        *nextCandidate = *unsortedMark = 0;
     }
 
     /* batch sort stacked candidates */
@@ -195,21 +289,21 @@
     while (matchIndex) {  /* will end on matchIndex == 0 */
         U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
         U32 const nextCandidateIdx = *nextCandidateIdxPtr;
-        ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
-                         nbCandidates, unsortLimit, extDict);
+        ZSTD_insertDUBT1(ms, matchIndex, iend,
+                         nbCandidates, unsortLimit, dictMode);
         matchIndex = nextCandidateIdx;
         nbCandidates++;
     }
 
     /* find longest match */
-    {   size_t commonLengthSmaller=0, commonLengthLarger=0;
+    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
         const BYTE* const dictBase = ms->window.dictBase;
         const U32 dictLimit = ms->window.dictLimit;
         const BYTE* const dictEnd = dictBase + dictLimit;
         const BYTE* const prefixStart = base + dictLimit;
         U32* smallerPtr = bt + 2*(current&btMask);
         U32* largerPtr  = bt + 2*(current&btMask) + 1;
-        U32 matchEndIdx = current+8+1;
+        U32 matchEndIdx = current + 8 + 1;
         U32 dummy32;   /* to be nullified at the end */
         size_t bestLength = 0;
 
@@ -221,7 +315,7 @@
             size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
             const BYTE* match;
 
-            if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
                 match = base + matchIndex;
                 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
             } else {
@@ -237,6 +331,11 @@
                 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
                     bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
                 if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
+                    if (dictMode == ZSTD_dictMatchState) {
+                        nbCompares = 0; /* in addition to avoiding checking any
+                                         * further in this loop, make sure we
+                                         * skip checking in the dictionary. */
+                    }
                     break;   /* drop, to guarantee consistency (miss a little bit of compression) */
                 }
             }
@@ -259,6 +358,13 @@
 
         *smallerPtr = *largerPtr = 0;
 
+        if (dictMode == ZSTD_dictMatchState && nbCompares) {
+            bestLength = ZSTD_DUBT_findBetterDictMatch(
+                    ms, ip, iend,
+                    offsetPtr, bestLength, nbCompares,
+                    mls, dictMode);
+        }
+
         assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
         ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
         if (bestLength >= MINMATCH) {
@@ -272,61 +378,64 @@
 
 
 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* const ip, const BYTE* const iLimit,
-                        size_t* offsetPtr,
-                        const U32 mls /* template */)
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+                const BYTE* const ip, const BYTE* const iLimit,
+                      size_t* offsetPtr,
+                const U32 mls /* template */,
+                const ZSTD_dictMode_e dictMode)
 {
     DEBUGLOG(7, "ZSTD_BtFindBestMatch");
     if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
-    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
+    ZSTD_updateDUBT(ms, ip, iLimit, mls);
+    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
 }
 
 
-static size_t ZSTD_BtFindBestMatch_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
+static size_t
+ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
+                            const BYTE* ip, const BYTE* const iLimit,
+                                  size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.minMatch)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
-    case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
     case 7 :
-    case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
     }
 }
 
 
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* const ip, const BYTE* const iLimit,
-                        size_t* offsetPtr,
-                        const U32 mls)
-{
-    DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
-    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
-    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
-}
-
-
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
                         size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.minMatch)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
-    case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
     case 7 :
-    case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+    }
+}
+
+
+static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
+                        ZSTD_matchState_t* ms,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(ms->cParams.minMatch)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
+    case 7 :
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
     }
 }
 
@@ -335,12 +444,13 @@
 /* *********************************
 *  Hash Chain
 ***********************************/
-#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & mask]
+#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
 
 /* Update chains up to ip (excluded)
    Assumption : always within prefix (i.e. not within extDict) */
 static U32 ZSTD_insertAndFindFirstIndex_internal(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
+                        const ZSTD_compressionParameters* const cParams,
                         const BYTE* ip, U32 const mls)
 {
     U32* const hashTable  = ms->hashTable;
@@ -362,22 +472,21 @@
     return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
 }
 
-U32 ZSTD_insertAndFindFirstIndex(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip)
-{
-    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
 }
 
 
 /* inlining is important to hardwire a hot branch (template emulation) */
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_HcFindBestMatch_generic (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* const ip, const BYTE* const iLimit,
                         size_t* offsetPtr,
-                        const U32 mls, const U32 extDict)
+                        const U32 mls, const ZSTD_dictMode_e dictMode)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const chainTable = ms->chainTable;
     const U32 chainSize = (1 << cParams->chainLog);
     const U32 chainMask = chainSize-1;
@@ -386,8 +495,10 @@
     const U32 dictLimit = ms->window.dictLimit;
     const BYTE* const prefixStart = base + dictLimit;
     const BYTE* const dictEnd = dictBase + dictLimit;
-    const U32 lowLimit = ms->window.lowLimit;
     const U32 current = (U32)(ip-base);
+    const U32 maxDistance = 1U << cParams->windowLog;
+    const U32 lowValid = ms->window.lowLimit;
+    const U32 lowLimit = (current - lowValid > maxDistance) ? current - maxDistance : lowValid;
     const U32 minChain = current > chainSize ? current - chainSize : 0;
     U32 nbAttempts = 1U << cParams->searchLog;
     size_t ml=4-1;
@@ -397,8 +508,9 @@
 
     for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
         size_t currentMl=0;
-        if ((!extDict) || matchIndex >= dictLimit) {
+        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
             const BYTE* const match = base + matchIndex;
+            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
             if (match[ml] == ip[ml])   /* potentially better */
                 currentMl = ZSTD_count(ip, match, iLimit);
         } else {
@@ -419,38 +531,87 @@
         matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
     }
 
+    if (dictMode == ZSTD_dictMatchState) {
+        const ZSTD_matchState_t* const dms = ms->dictMatchState;
+        const U32* const dmsChainTable = dms->chainTable;
+        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
+        const U32 dmsChainMask         = dmsChainSize - 1;
+        const U32 dmsLowestIndex       = dms->window.dictLimit;
+        const BYTE* const dmsBase      = dms->window.base;
+        const BYTE* const dmsEnd       = dms->window.nextSrc;
+        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
+        const U32 dmsIndexDelta        = dictLimit - dmsSize;
+        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+        for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+            size_t currentMl=0;
+            const BYTE* const match = dmsBase + matchIndex;
+            assert(match+4 <= dmsEnd);
+            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+            /* save best solution */
+            if (currentMl > ml) {
+                ml = currentMl;
+                *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+            }
+
+            if (matchIndex <= dmsMinChain) break;
+            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+        }
+    }
+
     return ml;
 }
 
 
 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
                         size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.minMatch)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
     case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+    }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
+                        ZSTD_matchState_t* ms,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(ms->cParams.minMatch)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+    case 7 :
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
     }
 }
 
 
 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
-                        size_t* const offsetPtr)
+                        size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.minMatch)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
     case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
     }
 }
 
@@ -462,30 +623,54 @@
 size_t ZSTD_compressBlock_lazy_generic(
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
-                        ZSTD_compressionParameters const* cParams,
                         const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth)
+                        const U32 searchMethod, const U32 depth,
+                        ZSTD_dictMode_e const dictMode)
 {
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base + ms->window.dictLimit;
+    const BYTE* const base = ms->window.base;
+    const U32 prefixLowestIndex = ms->window.dictLimit;
+    const BYTE* const prefixLowest = base + prefixLowestIndex;
 
     typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+    searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
+        (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
+        (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
     U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
 
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const U32 dictLowestIndex      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.dictLimit : 0;
+    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.base : NULL;
+    const BYTE* const dictLowest   = dictMode == ZSTD_dictMatchState ?
+                                     dictBase + dictLowestIndex : NULL;
+    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.nextSrc : NULL;
+    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
+                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
+                                     0;
+    const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
+
     /* init */
-    ip += (ip==base);
-    ms->nextToUpdate3 = ms->nextToUpdate;
-    {   U32 const maxRep = (U32)(ip-base);
+    ip += (dictAndPrefixLength == 0);
+    if (dictMode == ZSTD_noDict) {
+        U32 const maxRep = (U32)(ip - prefixLowest);
         if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
     }
+    if (dictMode == ZSTD_dictMatchState) {
+        /* dictMatchState repCode checks don't currently handle repCode == 0
+         * disabling. */
+        assert(offset_1 <= dictAndPrefixLength);
+        assert(offset_2 <= dictAndPrefixLength);
+    }
 
     /* Match Loop */
     while (ip < ilimit) {
@@ -494,15 +679,28 @@
         const BYTE* start=ip+1;
 
         /* check repCode */
-        if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
-            /* repcode : we take it */
+        if (dictMode == ZSTD_dictMatchState) {
+            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+            const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+                                && repIndex < prefixLowestIndex) ?
+                                   dictBase + (repIndex - dictIndexDelta) :
+                                   base + repIndex;
+            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                if (depth==0) goto _storeSequence;
+            }
+        }
+        if ( dictMode == ZSTD_noDict
+          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             if (depth==0) goto _storeSequence;
         }
 
         /* first search (depth 0) */
-        {   size_t offsetFound = 99999999;
-            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+        {   size_t offsetFound = 999999999;
+            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
             if (ml2 > matchLength)
                 matchLength = ml2, start = ip, offset=offsetFound;
         }
@@ -516,15 +714,31 @@
         if (depth>=1)
         while (ip<ilimit) {
             ip ++;
-            if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+            if ( (dictMode == ZSTD_noDict)
+              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
                 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
                 int const gain2 = (int)(mlRep * 3);
                 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
                 if ((mlRep >= 4) && (gain2 > gain1))
                     matchLength = mlRep, offset = 0, start = ip;
             }
-            {   size_t offset2=99999999;
-                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+            if (dictMode == ZSTD_dictMatchState) {
+                const U32 repIndex = (U32)(ip - base) - offset_1;
+                const BYTE* repMatch = repIndex < prefixLowestIndex ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
+                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                    int const gain2 = (int)(mlRep * 3);
+                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((mlRep >= 4) && (gain2 > gain1))
+                        matchLength = mlRep, offset = 0, start = ip;
+                }
+            }
+            {   size_t offset2=999999999;
+                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
                 if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -535,15 +749,31 @@
             /* let's find an even better one */
             if ((depth==2) && (ip<ilimit)) {
                 ip ++;
-                if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
-                    size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
-                    int const gain2 = (int)(ml2 * 4);
+                if ( (dictMode == ZSTD_noDict)
+                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+                    int const gain2 = (int)(mlRep * 4);
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
-                    if ((ml2 >= 4) && (gain2 > gain1))
-                        matchLength = ml2, offset = 0, start = ip;
+                    if ((mlRep >= 4) && (gain2 > gain1))
+                        matchLength = mlRep, offset = 0, start = ip;
                 }
-                {   size_t offset2=99999999;
-                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                if (dictMode == ZSTD_dictMatchState) {
+                    const U32 repIndex = (U32)(ip - base) - offset_1;
+                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
+                                   dictBase + (repIndex - dictIndexDelta) :
+                                   base + repIndex;
+                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                        int const gain2 = (int)(mlRep * 4);
+                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                        if ((mlRep >= 4) && (gain2 > gain1))
+                            matchLength = mlRep, offset = 0, start = ip;
+                    }
+                }
+                {   size_t offset2=999999999;
+                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
                     if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -560,9 +790,17 @@
          */
         /* catch up */
         if (offset) {
-            while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
-                 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
-                { start--; matchLength++; }
+            if (dictMode == ZSTD_noDict) {
+                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
+                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
+                    { start--; matchLength++; }
+            }
+            if (dictMode == ZSTD_dictMatchState) {
+                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
+            }
             offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
         }
         /* store sequence */
@@ -573,16 +811,39 @@
         }
 
         /* check immediate repcode */
-        while ( ((ip <= ilimit) & (offset_2>0))
-             && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
-            /* store sequence */
-            matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-            offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
-            ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
-            ip += matchLength;
-            anchor = ip;
-            continue;   /* faster when present ... (?) */
-    }   }
+        if (dictMode == ZSTD_dictMatchState) {
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex = current2 - offset_2;
+                const BYTE* repMatch = dictMode == ZSTD_dictMatchState
+                    && repIndex < prefixLowestIndex ?
+                        dictBase - dictIndexDelta + repIndex :
+                        base + repIndex;
+                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                    ip += matchLength;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+            }
+        }
+
+        if (dictMode == ZSTD_noDict) {
+            while ( ((ip <= ilimit) & (offset_2>0))
+                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+                /* store sequence */
+                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                ip += matchLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
 
     /* Save reps for next block */
     rep[0] = offset_1 ? offset_1 : savedOffset;
@@ -595,30 +856,58 @@
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_greedy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
 }
 
 
@@ -626,7 +915,6 @@
 size_t ZSTD_compressBlock_lazy_extDict_generic(
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
-                        ZSTD_compressionParameters const* cParams,
                         const void* src, size_t srcSize,
                         const U32 searchMethod, const U32 depth)
 {
@@ -644,14 +932,13 @@
     const BYTE* const dictStart  = dictBase + lowestIndex;
 
     typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
 
     U32 offset_1 = rep[0], offset_2 = rep[1];
 
     /* init */
-    ms->nextToUpdate3 = ms->nextToUpdate;
     ip += (ip == prefixStart);
 
     /* Match Loop */
@@ -674,8 +961,8 @@
         }   }
 
         /* first search (depth 0) */
-        {   size_t offsetFound = 99999999;
-            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+        {   size_t offsetFound = 999999999;
+            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
             if (ml2 > matchLength)
                 matchLength = ml2, start = ip, offset=offsetFound;
         }
@@ -707,8 +994,8 @@
             }   }
 
             /* search match, depth 1 */
-            {   size_t offset2=99999999;
-                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+            {   size_t offset2=999999999;
+                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
                 if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -737,8 +1024,8 @@
                 }   }
 
                 /* search match, depth 2 */
-                {   size_t offset2=99999999;
-                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                {   size_t offset2=999999999;
+                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
                     if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -794,31 +1081,31 @@
 
 size_t ZSTD_compressBlock_greedy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
 }
 
 size_t ZSTD_compressBlock_lazy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
 }
 
 size_t ZSTD_compressBlock_lazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
 }
 
 size_t ZSTD_compressBlock_btlazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.h b/vendor/github.com/DataDog/zstd/zstd_lazy.h
index bda064f..bb17630 100644
--- a/vendor/github.com/DataDog/zstd/zstd_lazy.h
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.h
@@ -17,37 +17,48 @@
 
 #include "zstd_compress_internal.h"
 
-U32 ZSTD_insertAndFindFirstIndex(
-        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-        const BYTE* ip);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
 
-void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_greedy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 
 size_t ZSTD_compressBlock_greedy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btlazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 #if defined (__cplusplus)
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c
index bffd8a3..3dcf86e 100644
--- a/vendor/github.com/DataDog/zstd/zstd_ldm.c
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.c
@@ -9,6 +9,7 @@
 
 #include "zstd_ldm.h"
 
+#include "debug.h"
 #include "zstd_fast.h"          /* ZSTD_fillHashTable() */
 #include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
 
@@ -20,7 +21,7 @@
 void ZSTD_ldm_adjustParameters(ldmParams_t* params,
                                ZSTD_compressionParameters const* cParams)
 {
-    U32 const windowLog = cParams->windowLog;
+    params->windowLog = cParams->windowLog;
     ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
     DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
     if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
@@ -33,12 +34,13 @@
       params->minMatchLength = minMatch;
     }
     if (params->hashLog == 0) {
-        params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
+        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
         assert(params->hashLog <= ZSTD_HASHLOG_MAX);
     }
-    if (params->hashEveryLog == 0) {
-        params->hashEveryLog =
-                windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
+    if (params->hashRateLog == 0) {
+        params->hashRateLog = params->windowLog < params->hashLog
+                                   ? 0
+                                   : params->windowLog - params->hashLog;
     }
     params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
 }
@@ -117,20 +119,20 @@
  *
  *  Gets the small hash, checksum, and tag from the rollingHash.
  *
- *  If the tag matches (1 << ldmParams.hashEveryLog)-1, then
+ *  If the tag matches (1 << ldmParams.hashRateLog)-1, then
  *  creates an ldmEntry from the offset, and inserts it into the hash table.
  *
  *  hBits is the length of the small hash, which is the most significant hBits
  *  of rollingHash. The checksum is the next 32 most significant bits, followed
- *  by ldmParams.hashEveryLog bits that make up the tag. */
+ *  by ldmParams.hashRateLog bits that make up the tag. */
 static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
                                              U64 const rollingHash,
                                              U32 const hBits,
                                              U32 const offset,
                                              ldmParams_t const ldmParams)
 {
-    U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
-    U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+    U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
+    U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
     if (tag == tagMask) {
         U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
         U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
@@ -141,56 +143,6 @@
     }
 }
 
-/** ZSTD_ldm_getRollingHash() :
- *  Get a 64-bit hash using the first len bytes from buf.
- *
- *  Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
- *  H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
- *
- *  where the constant a is defined to be prime8bytes.
- *
- *  The implementation adds an offset to each byte, so
- *  H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
-static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
-{
-    U64 ret = 0;
-    U32 i;
-    for (i = 0; i < len; i++) {
-        ret *= prime8bytes;
-        ret += buf[i] + LDM_HASH_CHAR_OFFSET;
-    }
-    return ret;
-}
-
-/** ZSTD_ldm_ipow() :
- *  Return base^exp. */
-static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
-{
-    U64 ret = 1;
-    while (exp) {
-        if (exp & 1) { ret *= base; }
-        exp >>= 1;
-        base *= base;
-    }
-    return ret;
-}
-
-U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
-    DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength);
-    assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
-    return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
-}
-
-/** ZSTD_ldm_updateHash() :
- *  Updates hash by removing toRemove and adding toAdd. */
-static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
-{
-    hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
-    hash *= prime8bytes;
-    hash += toAdd + LDM_HASH_CHAR_OFFSET;
-    return hash;
-}
-
 /** ZSTD_ldm_countBackwardsMatch() :
  *  Returns the number of bytes that match backwards before pIn and pMatch.
  *
@@ -216,21 +168,18 @@
  *  The tables for the other strategies are filled within their
  *  block compressors. */
 static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
-                                      ZSTD_compressionParameters const* cParams,
                                       void const* end)
 {
     const BYTE* const iend = (const BYTE*)end;
 
-    switch(cParams->strategy)
+    switch(ms->cParams.strategy)
     {
     case ZSTD_fast:
-        ZSTD_fillHashTable(ms, cParams, iend);
-        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
         break;
 
     case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, cParams, iend);
-        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
         break;
 
     case ZSTD_greedy:
@@ -239,6 +188,7 @@
     case ZSTD_btlazy2:
     case ZSTD_btopt:
     case ZSTD_btultra:
+    case ZSTD_btultra2:
         break;
     default:
         assert(0);  /* not possible : not a valid strategy id */
@@ -262,9 +212,9 @@
     const BYTE* cur = lastHashed + 1;
 
     while (cur < iend) {
-        rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
-                                          cur[ldmParams.minMatchLength-1],
-                                          state->hashPower);
+        rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
+                                              cur[ldmParams.minMatchLength-1],
+                                              state->hashPower);
         ZSTD_ldm_makeEntryAndInsertByTag(state,
                                          rollingHash, hBits,
                                          (U32)(cur - base), ldmParams);
@@ -298,8 +248,8 @@
     U64 const hashPower = ldmState->hashPower;
     U32 const hBits = params->hashLog - params->bucketSizeLog;
     U32 const ldmBucketSize = 1U << params->bucketSizeLog;
-    U32 const hashEveryLog = params->hashEveryLog;
-    U32 const ldmTagMask = (1U << params->hashEveryLog) - 1;
+    U32 const hashRateLog = params->hashRateLog;
+    U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
     /* Prefix and extDict parameters */
     U32 const dictLimit = ldmState->window.dictLimit;
     U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
@@ -325,16 +275,16 @@
         size_t forwardMatchLength = 0, backwardMatchLength = 0;
         ldmEntry_t* bestEntry = NULL;
         if (ip != istart) {
-            rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
-                                              lastHashed[minMatchLength],
-                                              hashPower);
+            rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
+                                                  lastHashed[minMatchLength],
+                                                  hashPower);
         } else {
-            rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength);
+            rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
         }
         lastHashed = ip;
 
         /* Do not insert and do not look for a match */
-        if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) {
+        if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
            ip++;
            continue;
         }
@@ -479,7 +429,7 @@
      */
     assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
     /* The input could be very large (in zstdmt), so it must be broken up into
-     * chunks to enforce the maximmum distance and handle overflow correction.
+     * chunks to enforce the maximum distance and handle overflow correction.
      */
     assert(sequences->pos <= sequences->size);
     assert(sequences->size <= sequences->capacity);
@@ -497,7 +447,7 @@
         if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
             U32 const ldmHSize = 1U << params->hashLog;
             U32 const correction = ZSTD_window_correctOverflow(
-                &ldmState->window, /* cycleLog */ 0, maxDist, src);
+                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
             ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
         }
         /* 2. We enforce the maximum offset allowed.
@@ -508,7 +458,7 @@
          *       * Try invalidation after the sequence generation and test the
          *         the offset against maxDist directly.
          */
-        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
+        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
         /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
         newLeftoverSize = ZSTD_ldm_generateSequences_internal(
             ldmState, sequences, params, chunkStart, chunkSize);
@@ -591,19 +541,19 @@
 
 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
     ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-    ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
-    int const extDict)
+    void const* src, size_t srcSize)
 {
-    unsigned const minMatch = cParams->searchLength;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    unsigned const minMatch = cParams->minMatch;
     ZSTD_blockCompressor const blockCompressor =
-        ZSTD_selectBlockCompressor(cParams->strategy, extDict);
-    BYTE const* const base = ms->window.base;
+        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
     /* Input bounds */
     BYTE const* const istart = (BYTE const*)src;
     BYTE const* const iend = istart + srcSize;
     /* Input positions */
     BYTE const* ip = istart;
 
+    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
     assert(rawSeqStore->pos <= rawSeqStore->size);
     assert(rawSeqStore->size <= rawSeqStore->capacity);
     /* Loop through each sequence and apply the block compressor to the lits */
@@ -621,14 +571,13 @@
 
         /* Fill tables for block compressor */
         ZSTD_ldm_limitTableUpdate(ms, ip);
-        ZSTD_ldm_fillFastTables(ms, cParams, ip);
+        ZSTD_ldm_fillFastTables(ms, ip);
         /* Run the block compressor */
+        DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
         {
             size_t const newLitLength =
-                blockCompressor(ms, seqStore, rep, cParams, ip,
-                                sequence.litLength);
+                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
             ip += sequence.litLength;
-            ms->nextToUpdate = (U32)(ip - base);
             /* Update the repcodes */
             for (i = ZSTD_REP_NUM - 1; i > 0; i--)
                 rep[i] = rep[i-1];
@@ -642,12 +591,7 @@
     }
     /* Fill the tables for the block compressor */
     ZSTD_ldm_limitTableUpdate(ms, ip);
-    ZSTD_ldm_fillFastTables(ms, cParams, ip);
+    ZSTD_ldm_fillFastTables(ms, ip);
     /* Compress the last literals */
-    {
-        size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
-                                                    ip, iend - ip);
-        ms->nextToUpdate = (U32)(iend - base);
-        return lastLiterals;
-    }
+    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.h b/vendor/github.com/DataDog/zstd/zstd_ldm.h
index 0c3789f..a478461 100644
--- a/vendor/github.com/DataDog/zstd/zstd_ldm.h
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.h
@@ -21,7 +21,7 @@
 *  Long distance matching
 ***************************************/
 
-#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
 
 /**
  * ZSTD_ldm_generateSequences():
@@ -61,9 +61,7 @@
  */
 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
             ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-            ZSTD_compressionParameters const* cParams,
-            void const* src, size_t srcSize,
-            int const extDict);
+            void const* src, size_t srcSize);
 
 /**
  * ZSTD_ldm_skipSequences():
@@ -88,12 +86,8 @@
  */
 size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
 
-/** ZSTD_ldm_getTableSize() :
- *  Return prime8bytes^(minMatchLength-1) */
-U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
-
 /** ZSTD_ldm_adjustParameters() :
- *  If the params->hashEveryLog is not set, set it to its default value based on
+ *  If the params->hashRateLog is not set, set it to its default value based on
  *  windowLog and params->hashLog.
  *
  *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
diff --git a/vendor/github.com/DataDog/zstd/zstd_legacy.h b/vendor/github.com/DataDog/zstd/zstd_legacy.h
index 5893cb9..0dbd3c7 100644
--- a/vendor/github.com/DataDog/zstd/zstd_legacy.h
+++ b/vendor/github.com/DataDog/zstd/zstd_legacy.h
@@ -20,7 +20,7 @@
 ***************************************/
 #include "mem.h"            /* MEM_STATIC */
 #include "error_private.h"  /* ERROR */
-#include "zstd.h"           /* ZSTD_inBuffer, ZSTD_outBuffer */
+#include "zstd_internal.h"  /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */
 
 #if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
 #  undef ZSTD_LEGACY_SUPPORT
@@ -178,43 +178,77 @@
     }
 }
 
-MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
-                                             size_t compressedSize)
+MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)
 {
-    U32 const version = ZSTD_isLegacy(src, compressedSize);
+    ZSTD_frameSizeInfo frameSizeInfo;
+    U32 const version = ZSTD_isLegacy(src, srcSize);
     switch(version)
     {
 #if (ZSTD_LEGACY_SUPPORT <= 1)
         case 1 :
-            return ZSTDv01_findFrameCompressedSize(src, compressedSize);
+            ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 2)
         case 2 :
-            return ZSTDv02_findFrameCompressedSize(src, compressedSize);
+            ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 3)
         case 3 :
-            return ZSTDv03_findFrameCompressedSize(src, compressedSize);
+            ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 4)
         case 4 :
-            return ZSTDv04_findFrameCompressedSize(src, compressedSize);
+            ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 5)
         case 5 :
-            return ZSTDv05_findFrameCompressedSize(src, compressedSize);
+            ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 6)
         case 6 :
-            return ZSTDv06_findFrameCompressedSize(src, compressedSize);
+            ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
 #if (ZSTD_LEGACY_SUPPORT <= 7)
         case 7 :
-            return ZSTDv07_findFrameCompressedSize(src, compressedSize);
+            ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,
+                &frameSizeInfo.compressedSize,
+                &frameSizeInfo.decompressedBound);
+            break;
 #endif
         default :
-            return ERROR(prefix_unknown);
+            frameSizeInfo.compressedSize = ERROR(prefix_unknown);
+            frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+            break;
     }
+    if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) {
+        frameSizeInfo.compressedSize = ERROR(srcSize_wrong);
+        frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+    }
+    return frameSizeInfo;
+}
+
+MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)
+{
+    ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize);
+    return frameSizeInfo.compressedSize;
 }
 
 MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c
index f63f0c5..e32e542 100644
--- a/vendor/github.com/DataDog/zstd/zstd_opt.c
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.c
@@ -9,139 +9,259 @@
  */
 
 #include "zstd_compress_internal.h"
+#include "hist.h"
 #include "zstd_opt.h"
 
 
-#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
+#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
 #define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
 #define ZSTD_MAX_PRICE     (1<<30)
 
+#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
 
 /*-*************************************
 *  Price functions for optimal parser
 ***************************************/
-static void ZSTD_setLog2Prices(optState_t* optPtr)
+
+#if 0    /* approximation at bit level */
+#  define BITCOST_ACCURACY 0
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
+#elif 0  /* fractional bit accuracy */
+#  define BITCOST_ACCURACY 8
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else    /* opt==approx, ultra==accurate */
+#  define BITCOST_ACCURACY 8
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
 {
-    optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
-    optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
-    optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
-    optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
+    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+    U32 const stat = rawStat + 1;
+    U32 const hb = ZSTD_highbit32(stat);
+    U32 const BWeight = hb * BITCOST_MULTIPLIER;
+    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+    U32 const weight = BWeight + FWeight;
+    assert(hb + BITCOST_ACCURACY < 31);
+    return weight;
+}
+
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+    return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
+
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+    if (ZSTD_compressedLiterals(optPtr))
+        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
 }
 
 
-static void ZSTD_rescaleFreqs(optState_t* const optPtr,
-                              const BYTE* const src, size_t const srcSize)
+/* ZSTD_downscaleStat() :
+ * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
+ * return the resulting sum of elements */
+static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
 {
-    optPtr->staticPrices = 0;
+    U32 s, sum=0;
+    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
+    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
+    for (s=0; s<lastEltIndex+1; s++) {
+        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
+        sum += table[s];
+    }
+    return sum;
+}
 
-    if (optPtr->litLengthSum == 0) {  /* first init */
-        unsigned u;
-        if (srcSize <= 1024) optPtr->staticPrices = 1;
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ *    take hints from dictionary if there is one
+ *    or init from zero, using src for literals stats, or flat 1 for match symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+            const BYTE* const src, size_t const srcSize,
+                  int const optLevel)
+{
+    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
+    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+    optPtr->priceType = zop_dynamic;
 
-        assert(optPtr->litFreq!=NULL);
-        for (u=0; u<=MaxLit; u++)
-            optPtr->litFreq[u] = 0;
-        for (u=0; u<srcSize; u++)
-            optPtr->litFreq[src[u]]++;
-        optPtr->litSum = 0;
-        for (u=0; u<=MaxLit; u++) {
-            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV);
-            optPtr->litSum += optPtr->litFreq[u];
+    if (optPtr->litLengthSum == 0) {  /* first block : init */
+        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
+            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+            optPtr->priceType = zop_predef;
         }
 
-        for (u=0; u<=MaxLL; u++)
-            optPtr->litLengthFreq[u] = 1;
-        optPtr->litLengthSum = MaxLL+1;
-        for (u=0; u<=MaxML; u++)
-            optPtr->matchLengthFreq[u] = 1;
-        optPtr->matchLengthSum = MaxML+1;
-        for (u=0; u<=MaxOff; u++)
-            optPtr->offCodeFreq[u] = 1;
-        optPtr->offCodeSum = (MaxOff+1);
+        assert(optPtr->symbolCosts != NULL);
+        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+            /* huffman table presumed generated by dictionary */
+            optPtr->priceType = zop_dynamic;
 
-    } else {
-        unsigned u;
+            if (compressedLiterals) {
+                unsigned lit;
+                assert(optPtr->litFreq != NULL);
+                optPtr->litSum = 0;
+                for (lit=0; lit<=MaxLit; lit++) {
+                    U32 const scaleLog = 11;   /* scale to 2K */
+                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+                    assert(bitCost <= scaleLog);
+                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->litSum += optPtr->litFreq[lit];
+            }   }
 
-        optPtr->litSum = 0;
-        for (u=0; u<=MaxLit; u++) {
-            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1));
-            optPtr->litSum += optPtr->litFreq[u];
+            {   unsigned ll;
+                FSE_CState_t llstate;
+                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+                optPtr->litLengthSum = 0;
+                for (ll=0; ll<=MaxLL; ll++) {
+                    U32 const scaleLog = 10;   /* scale to 1K */
+                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+                    assert(bitCost < scaleLog);
+                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+            }   }
+
+            {   unsigned ml;
+                FSE_CState_t mlstate;
+                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+                optPtr->matchLengthSum = 0;
+                for (ml=0; ml<=MaxML; ml++) {
+                    U32 const scaleLog = 10;
+                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+                    assert(bitCost < scaleLog);
+                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+            }   }
+
+            {   unsigned of;
+                FSE_CState_t ofstate;
+                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+                optPtr->offCodeSum = 0;
+                for (of=0; of<=MaxOff; of++) {
+                    U32 const scaleLog = 10;
+                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+                    assert(bitCost < scaleLog);
+                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
+            }   }
+
+        } else {  /* not a dictionary */
+
+            assert(optPtr->litFreq != NULL);
+            if (compressedLiterals) {
+                unsigned lit = MaxLit;
+                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
+                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+            }
+
+            {   unsigned ll;
+                for (ll=0; ll<=MaxLL; ll++)
+                    optPtr->litLengthFreq[ll] = 1;
+            }
+            optPtr->litLengthSum = MaxLL+1;
+
+            {   unsigned ml;
+                for (ml=0; ml<=MaxML; ml++)
+                    optPtr->matchLengthFreq[ml] = 1;
+            }
+            optPtr->matchLengthSum = MaxML+1;
+
+            {   unsigned of;
+                for (of=0; of<=MaxOff; of++)
+                    optPtr->offCodeFreq[of] = 1;
+            }
+            optPtr->offCodeSum = MaxOff+1;
+
         }
-        optPtr->litLengthSum = 0;
-        for (u=0; u<=MaxLL; u++) {
-            optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
-            optPtr->litLengthSum += optPtr->litLengthFreq[u];
-        }
-        optPtr->matchLengthSum = 0;
-        for (u=0; u<=MaxML; u++) {
-            optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
-            optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
-        }
-        optPtr->offCodeSum = 0;
-        for (u=0; u<=MaxOff; u++) {
-            optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
-            optPtr->offCodeSum += optPtr->offCodeFreq[u];
-        }
+
+    } else {   /* new block : re-use previous statistics, scaled down */
+
+        if (compressedLiterals)
+            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
     }
 
-    ZSTD_setLog2Prices(optPtr);
+    ZSTD_setBasePrices(optPtr, optLevel);
 }
 
-
 /* ZSTD_rawLiteralsCost() :
- * cost of literals (only) in given segment (which length can be null)
- * does not include cost of literalLength symbol */
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
 static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
-                                const optState_t* const optPtr)
+                                const optState_t* const optPtr,
+                                int optLevel)
 {
-    if (optPtr->staticPrices) return (litLength*6);  /* 6 bit per literal - no statistic used */
     if (litLength == 0) return 0;
 
-    /* literals */
-    {   U32 u;
-        U32 cost = litLength * optPtr->log2litSum;
-        for (u=0; u < litLength; u++)
-            cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
-        return cost;
-    }
-}
+    if (!ZSTD_compressedLiterals(optPtr))
+        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
 
-/* ZSTD_litLengthPrice() :
- * cost of literalLength symbol */
-static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr)
-{
-    if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1);
+    if (optPtr->priceType == zop_predef)
+        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
 
-    /* literal Length */
-    {   U32 const llCode = ZSTD_LLcode(litLength);
-        U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+    /* dynamic statistics */
+    {   U32 price = litLength * optPtr->litSumBasePrice;
+        U32 u;
+        for (u=0; u < litLength; u++) {
+            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
+            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+        }
         return price;
     }
 }
 
 /* ZSTD_litLengthPrice() :
- * cost of the literal part of a sequence,
- * including literals themselves, and literalLength symbol */
-static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength,
-                                 const optState_t* const optPtr)
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
 {
-    return ZSTD_rawLiteralsCost(literals, litLength, optPtr)
-         + ZSTD_litLengthPrice(litLength, optPtr);
+    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
+
+    /* dynamic statistics */
+    {   U32 const llCode = ZSTD_LLcode(litLength);
+        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+             + optPtr->litLengthSumBasePrice
+             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+    }
 }
 
 /* ZSTD_litLengthContribution() :
  * @return ( cost(litlength) - cost(0) )
  * this value can then be added to rawLiteralsCost()
  * to provide a cost which is directly comparable to a match ending at same position */
-static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr)
+static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
 {
-    if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1);
+    if (optPtr->priceType >= zop_predef) return (int)WEIGHT(litLength, optLevel);
 
-    /* literal Length */
+    /* dynamic statistics */
     {   U32 const llCode = ZSTD_LLcode(litLength);
-        int const contribution = LL_bits[llCode]
-                        + ZSTD_highbit32(optPtr->litLengthFreq[0]+1)
-                        - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+        int const contribution = (int)(LL_bits[llCode] * BITCOST_MULTIPLIER)
+                               + (int)WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */
+                               - (int)WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
 #if 1
         return contribution;
 #else
@@ -155,10 +275,11 @@
  * which can be compared to the ending cost of a match
  * should a new match start at this position */
 static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
-                                     const optState_t* const optPtr)
+                                     const optState_t* const optPtr,
+                                     int optLevel)
 {
-    int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr)
-                           + ZSTD_litLengthContribution(litLength, optPtr);
+    int const contribution = (int)ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+                           + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
     return contribution;
 }
 
@@ -166,37 +287,45 @@
  * Provides the cost of the match part (offset + matchLength) of a sequence
  * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
  * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
-FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(
-                                    U32 const offset, U32 const matchLength,
-                                    const optState_t* const optPtr,
-                                    int const optLevel)
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offset,
+                   U32 const matchLength,
+             const optState_t* const optPtr,
+                   int const optLevel)
 {
     U32 price;
     U32 const offCode = ZSTD_highbit32(offset+1);
     U32 const mlBase = matchLength - MINMATCH;
     assert(matchLength >= MINMATCH);
 
-    if (optPtr->staticPrices)  /* fixed scheme, do not use statistics */
-        return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode;
+    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
+        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
 
-    price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
-    if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */
+    /* dynamic statistics */
+    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+    if ((optLevel<2) /*static*/ && offCode >= 20)
+        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
 
     /* match Length */
     {   U32 const mlCode = ZSTD_MLcode(mlBase);
-        price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
+        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
     }
 
+    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
     DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
     return price;
 }
 
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
 static void ZSTD_updateStats(optState_t* const optPtr,
                              U32 litLength, const BYTE* literals,
                              U32 offsetCode, U32 matchLength)
 {
     /* literals */
-    {   U32 u;
+    if (ZSTD_compressedLiterals(optPtr)) {
+        U32 u;
         for (u=0; u < litLength; u++)
             optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
         optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
@@ -243,13 +372,15 @@
 
 /* Update hashTable3 up to ip (excluded)
    Assumption : always within prefix (i.e. not within extDict) */
-static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
+static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
+                                              U32* nextToUpdate3,
+                                              const BYTE* const ip)
 {
     U32* const hashTable3 = ms->hashTable3;
     U32 const hashLog3 = ms->hashLog3;
     const BYTE* const base = ms->window.base;
-    U32 idx = ms->nextToUpdate3;
-    U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
+    U32 idx = *nextToUpdate3;
+    U32 const target = (U32)(ip - base);
     size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
     assert(hashLog3 > 0);
 
@@ -258,6 +389,7 @@
         idx++;
     }
 
+    *nextToUpdate3 = target;
     return hashTable3[hash3];
 }
 
@@ -269,10 +401,11 @@
  *  ip : assumed <= iend-8 .
  * @return : nb of positions added */
 static U32 ZSTD_insertBt1(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                ZSTD_matchState_t* ms,
                 const BYTE* const ip, const BYTE* const iend,
-                U32 const mls, U32 const extDict)
+                U32 const mls, const int extDict)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32*   const hashTable = ms->hashTable;
     U32    const hashLog = cParams->hashLog;
     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
@@ -308,7 +441,8 @@
     assert(ip <= iend-8);   /* required for h calculation */
     hashTable[h] = current;   /* Update Hash Table */
 
-    while (nbCompares-- && (matchIndex > windowLow)) {
+    assert(windowLow > 0);
+    while (nbCompares-- && (matchIndex >= windowLow)) {
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         assert(matchIndex < current);
@@ -334,8 +468,8 @@
         }
 #endif
 
-        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
-            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if extDict is incorrectly set to 0 */
+        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
             match = base + matchIndex;
             matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
         } else {
@@ -372,43 +506,53 @@
     }   }
 
     *smallerPtr = *largerPtr = 0;
-    if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));   /* speed optimization */
-    assert(matchEndIdx > current + 8);
-    return matchEndIdx - (current + 8);
+    {   U32 positions = 0;
+        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
+        assert(matchEndIdx > current + 8);
+        return MAX(positions, matchEndIdx - (current + 8));
+    }
 }
 
 FORCE_INLINE_TEMPLATE
 void ZSTD_updateTree_internal(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                ZSTD_matchState_t* ms,
                 const BYTE* const ip, const BYTE* const iend,
-                const U32 mls, const U32 extDict)
+                const U32 mls, const ZSTD_dictMode_e dictMode)
 {
     const BYTE* const base = ms->window.base;
     U32 const target = (U32)(ip - base);
     U32 idx = ms->nextToUpdate;
-    DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u  (extDict:%u)",
-                idx, target, extDict);
+    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
+                idx, target, dictMode);
 
-    while(idx < target)
-        idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
+    while(idx < target) {
+        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
+        assert(idx < (U32)(idx + forward));
+        idx += forward;
+    }
+    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
+    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
     ms->nextToUpdate = target;
 }
 
-void ZSTD_updateTree(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                const BYTE* ip, const BYTE* iend)
-{
-    ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
 }
 
 FORCE_INLINE_TEMPLATE
 U32 ZSTD_insertBtAndGetAllMatches (
-                    ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                    const BYTE* const ip, const BYTE* const iLimit, int const extDict,
-                    U32 rep[ZSTD_REP_NUM], U32 const ll0,
-                    ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
+                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
+                    ZSTD_matchState_t* ms,
+                    U32* nextToUpdate3,
+                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+                    const U32 rep[ZSTD_REP_NUM],
+                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+                    const U32 lengthToBeat,
+                    U32 const mls /* template */)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+    U32 const maxDistance = 1U << cParams->windowLog;
     const BYTE* const base = ms->window.base;
     U32 const current = (U32)(ip-base);
     U32 const hashLog = cParams->hashLog;
@@ -424,8 +568,10 @@
     U32 const dictLimit = ms->window.dictLimit;
     const BYTE* const dictEnd = dictBase + dictLimit;
     const BYTE* const prefixStart = base + dictLimit;
-    U32 const btLow = btMask >= current ? 0 : current - btMask;
-    U32 const windowLow = ms->window.lowLimit;
+    U32 const btLow = (btMask >= current) ? 0 : current - btMask;
+    U32 const windowValid = ms->window.lowLimit;
+    U32 const windowLow = ((current - windowValid) > maxDistance) ? current - maxDistance : windowValid;
+    U32 const matchLow = windowLow ? windowLow : 1;
     U32* smallerPtr = bt + 2*(current&btMask);
     U32* largerPtr  = bt + 2*(current&btMask) + 1;
     U32 matchEndIdx = current+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
@@ -433,10 +579,24 @@
     U32 mnum = 0;
     U32 nbCompares = 1U << cParams->searchLog;
 
+    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+    const ZSTD_compressionParameters* const dmsCParams =
+                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
     size_t bestLength = lengthToBeat-1;
-    DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
+    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
 
     /* check repCode */
+    assert(ll0 <= 1);   /* necessarily 1 or 0 */
     {   U32 const lastR = ZSTD_REP_NUM + ll0;
         U32 repCode;
         for (repCode = ll0; repCode < lastR; repCode++) {
@@ -449,18 +609,26 @@
                     repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
                 }
             } else {  /* repIndex < dictLimit || repIndex >= current */
-                const BYTE* const repMatch = dictBase + repIndex;
+                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+                                             dmsBase + repIndex - dmsIndexDelta :
+                                             dictBase + repIndex;
                 assert(current >= windowLow);
-                if ( extDict /* this case only valid in extDict mode */
+                if ( dictMode == ZSTD_extDict
                   && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow)  /* equivalent to `current > repIndex >= windowLow` */
                      & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
                     repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+                }
+                if (dictMode == ZSTD_dictMatchState
+                  && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `current > repIndex >= dmsLowLimit` */
+                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
             }   }
             /* save longer solution */
             if (repLen > bestLength) {
-                DEBUGLOG(8, "found rep-match %u of length %u",
-                            repCode - ll0, (U32)repLen);
+                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+                            repCode, ll0, repOffset, repLen);
                 bestLength = repLen;
                 matches[mnum].off = repCode - ll0;
                 matches[mnum].len = (U32)repLen;
@@ -472,11 +640,11 @@
 
     /* HC3 match finder */
     if ((mls == 3) /*static*/ && (bestLength < mls)) {
-        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
-        if ((matchIndex3 > windowLow)
+        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
+        if ((matchIndex3 >= matchLow)
           & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
             size_t mlen;
-            if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) {
+            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
                 const BYTE* const match = base + matchIndex3;
                 mlen = ZSTD_count(ip, match, iLimit);
             } else {
@@ -498,17 +666,19 @@
                      (ip+mlen == iLimit) ) {  /* best possible length */
                     ms->nextToUpdate = current+1;  /* skip insertion */
                     return 1;
-    }   }   }   }
+        }   }   }
+        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+    }
 
     hashTable[h] = current;   /* Update Hash Table */
 
-    while (nbCompares-- && (matchIndex > windowLow)) {
+    while (nbCompares-- && (matchIndex >= matchLow)) {
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         const BYTE* match;
         assert(current > matchIndex);
 
-        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
             assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
             match = base + matchIndex;
             matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
@@ -520,8 +690,8 @@
         }
 
         if (matchLength > bestLength) {
-            DEBUGLOG(8, "found match of length %u at distance %u",
-                    (U32)matchLength, current - matchIndex);
+            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+                    (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
             assert(matchEndIdx > matchIndex);
             if (matchLength > matchEndIdx - matchIndex)
                 matchEndIdx = matchIndex + (U32)matchLength;
@@ -529,9 +699,10 @@
             matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
             matches[mnum].len = (U32)matchLength;
             mnum++;
-            if (matchLength > ZSTD_OPT_NUM) break;
-            if (ip+matchLength == iLimit) {  /* equal : no way to know if inf or sup */
-                break;   /* drop, to preserve bt consistency (miss a little bit of compression) */
+            if ( (matchLength > ZSTD_OPT_NUM)
+               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
             }
         }
 
@@ -552,6 +723,47 @@
 
     *smallerPtr = *largerPtr = 0;
 
+    if (dictMode == ZSTD_dictMatchState && nbCompares) {
+        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+        U32 dictMatchIndex = dms->hashTable[dmsH];
+        const U32* const dmsBt = dms->chainTable;
+        commonLengthSmaller = commonLengthLarger = 0;
+        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
+            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+            const BYTE* match = dmsBase + dictMatchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+            if (dictMatchIndex+matchLength >= dmsHighLimit)
+                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
+
+            if (matchLength > bestLength) {
+                matchIndex = dictMatchIndex + dmsIndexDelta;
+                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+                        (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+                if (matchLength > matchEndIdx - matchIndex)
+                    matchEndIdx = matchIndex + (U32)matchLength;
+                bestLength = matchLength;
+                matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+                matches[mnum].len = (U32)matchLength;
+                mnum++;
+                if ( (matchLength > ZSTD_OPT_NUM)
+                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+                }
+            }
+
+            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
+            if (match[matchLength] < ip[matchLength]) {
+                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+            } else {
+                /* match is larger than current */
+                commonLengthLarger = matchLength;
+                dictMatchIndex = nextPtr[0];
+            }
+        }
+    }
+
     assert(matchEndIdx > current+8);
     ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
     return mnum;
@@ -559,23 +771,27 @@
 
 
 FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
-                        U32 rep[ZSTD_REP_NUM], U32 const ll0,
-                        ZSTD_match_t* matches, U32 const lengthToBeat)
+                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
+                        ZSTD_matchState_t* ms,
+                        U32* nextToUpdate3,
+                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
+                        const U32 rep[ZSTD_REP_NUM],
+                        U32 const ll0,
+                        U32 const lengthToBeat)
 {
-    U32 const matchLengthSearch = cParams->searchLength;
-    DEBUGLOG(7, "ZSTD_BtGetAllMatches");
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32 const matchLengthSearch = cParams->minMatch;
+    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
     if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
+    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
     switch(matchLengthSearch)
     {
-    case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
+    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
     default :
-    case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
-    case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
+    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
+    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
     case 7 :
-    case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
+    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
     }
 }
 
@@ -587,7 +803,7 @@
     U32 rep[3];
 } repcodes_t;
 
-repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
 {
     repcodes_t newReps;
     if (offset >= ZSTD_REP_NUM) {  /* full offset */
@@ -609,65 +825,35 @@
 }
 
 
-typedef struct {
-    const BYTE* anchor;
-    U32 litlen;
-    U32 rawLitCost;
-} cachedLiteralPrice_t;
-
-static U32 ZSTD_rawLiteralsCost_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
 {
-    U32 startCost;
-    U32 remainingLength;
-    const BYTE* startPosition;
+    return sol.litlen + sol.mlen;
+}
 
-    if (anchor == cachedLitPrice->anchor) {
-        startCost = cachedLitPrice->rawLitCost;
-        startPosition = anchor + cachedLitPrice->litlen;
-        assert(litlen >= cachedLitPrice->litlen);
-        remainingLength = litlen - cachedLitPrice->litlen;
-    } else {
-        startCost = 0;
-        startPosition = anchor;
-        remainingLength = litlen;
+#if 0 /* debug */
+
+static void
+listStats(const U32* table, int lastEltID)
+{
+    int const nbElts = lastEltID + 1;
+    int enb;
+    for (enb=0; enb < nbElts; enb++) {
+        (void)table;
+        //RAWLOG(2, "%3i:%3i,  ", enb, table[enb]);
+        RAWLOG(2, "%4i,", table[enb]);
     }
-
-    {   U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr);
-        cachedLitPrice->anchor = anchor;
-        cachedLitPrice->litlen = litlen;
-        cachedLitPrice->rawLitCost = rawLitCost;
-        return rawLitCost;
-    }
+    RAWLOG(2, " \n");
 }
 
-static U32 ZSTD_fullLiteralsCost_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
-{
-    return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
-         + ZSTD_litLengthPrice(litlen, optStatePtr);
-}
+#endif
 
-static int ZSTD_literalsContribution_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
-{
-    int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
-                           + ZSTD_litLengthContribution(litlen, optStatePtr);
-    return contribution;
-}
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
-                                      U32 rep[ZSTD_REP_NUM],
-                                      ZSTD_compressionParameters const* cParams,
-                                      const void* src, size_t srcSize,
-                                      const int optLevel, const int extDict)
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+                               seqStore_t* seqStore,
+                               U32 rep[ZSTD_REP_NUM],
+                         const void* src, size_t srcSize,
+                         const int optLevel,
+                         const ZSTD_dictMode_e dictMode)
 {
     optState_t* const optStatePtr = &ms->opt;
     const BYTE* const istart = (const BYTE*)src;
@@ -677,72 +863,77 @@
     const BYTE* const ilimit = iend - 8;
     const BYTE* const base = ms->window.base;
     const BYTE* const prefixStart = base + ms->window.dictLimit;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 
     U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
-    U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
+    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+    U32 nextToUpdate3 = ms->nextToUpdate;
 
     ZSTD_optimal_t* const opt = optStatePtr->priceTable;
     ZSTD_match_t* const matches = optStatePtr->matchTable;
-    cachedLiteralPrice_t cachedLitPrice;
+    ZSTD_optimal_t lastSequence;
 
     /* init */
-    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
-    ms->nextToUpdate3 = ms->nextToUpdate;
-    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+    assert(optLevel <= 2);
+    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
     ip += (ip==prefixStart);
-    memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
 
     /* Match Loop */
     while (ip < ilimit) {
         U32 cur, last_pos = 0;
-        U32 best_mlen, best_off;
 
         /* find first match */
         {   U32 const litlen = (U32)(ip - anchor);
             U32 const ll0 = !litlen;
-            U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
+            U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
             if (!nbMatches) { ip++; continue; }
 
             /* initialize opt[0] */
             { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
-            opt[0].mlen = 1;
+            opt[0].mlen = 0;  /* means is_a_literal */
             opt[0].litlen = litlen;
+            opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
 
             /* large match -> immediate encoding */
             {   U32 const maxML = matches[nbMatches-1].len;
-                DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie",
-                            nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart));
+                U32 const maxOffset = matches[nbMatches-1].off;
+                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
+                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
 
                 if (maxML > sufficient_len) {
-                    best_mlen = maxML;
-                    best_off = matches[nbMatches-1].off;
-                    DEBUGLOG(7, "large match (%u>%u), immediate encoding",
-                                best_mlen, sufficient_len);
+                    lastSequence.litlen = litlen;
+                    lastSequence.mlen = maxML;
+                    lastSequence.off = maxOffset;
+                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+                                maxML, sufficient_len);
                     cur = 0;
-                    last_pos = 1;
+                    last_pos = ZSTD_totalLen(lastSequence);
                     goto _shortestPath;
             }   }
 
             /* set prices for first matches starting position == 0 */
-            {   U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
+            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
                 U32 pos;
                 U32 matchNb;
-                for (pos = 0; pos < minMatch; pos++) {
-                    opt[pos].mlen = 1;
-                    opt[pos].price = ZSTD_MAX_PRICE;
+                for (pos = 1; pos < minMatch; pos++) {
+                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
                 }
                 for (matchNb = 0; matchNb < nbMatches; matchNb++) {
                     U32 const offset = matches[matchNb].off;
                     U32 const end = matches[matchNb].len;
                     repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
                     for ( ; pos <= end ; pos++ ) {
-                        U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
-                        DEBUGLOG(7, "rPos:%u => set initial price : %u",
-                                    pos, matchPrice);
+                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+                        U32 const sequencePrice = literalsPrice + matchPrice;
+                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+                                    pos, ZSTD_fCost(sequencePrice));
                         opt[pos].mlen = pos;
                         opt[pos].off = offset;
                         opt[pos].litlen = litlen;
-                        opt[pos].price = matchPrice;
+                        opt[pos].price = sequencePrice;
+                        ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
                         memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
                 }   }
                 last_pos = pos-1;
@@ -753,55 +944,67 @@
         for (cur = 1; cur <= last_pos; cur++) {
             const BYTE* const inr = ip + cur;
             assert(cur < ZSTD_OPT_NUM);
+            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
 
             /* Fix current position with one literal if cheaper */
-            {   U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1;
-                int price;  /* note : contribution can be negative */
-                if (cur > litlen) {
-                    price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr);
-                } else {
-                    price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
-                }
+            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+                int const price = opt[cur-1].price
+                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
                 assert(price < 1000000000); /* overflow check */
                 if (price <= opt[cur].price) {
-                    DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal",
-                                cur, price, opt[cur].price);
-                    opt[cur].mlen = 1;
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+                    opt[cur].mlen = 0;
                     opt[cur].off = 0;
                     opt[cur].litlen = litlen;
                     opt[cur].price = price;
                     memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
-            }   }
+                } else {
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+                }
+            }
 
             /* last match must start at a minimum distance of 8 from oend */
             if (inr > ilimit) continue;
 
             if (cur == last_pos) break;
 
-             if ( (optLevel==0) /*static*/
-               && (opt[cur+1].price <= opt[cur].price) )
+            if ( (optLevel==0) /*static_test*/
+              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
                 continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+            }
 
-            {   U32 const ll0 = (opt[cur].mlen != 1);
-                U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
-                U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
-                U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
-                U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
+            {   U32 const ll0 = (opt[cur].mlen != 0);
+                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+                U32 const previousPrice = opt[cur].price;
+                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+                U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
                 U32 matchNb;
-                if (!nbMatches) continue;
+                if (!nbMatches) {
+                    DEBUGLOG(7, "rPos:%u : no match found", cur);
+                    continue;
+                }
 
                 {   U32 const maxML = matches[nbMatches-1].len;
-                    DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u",
-                                cur, nbMatches, maxML);
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+                                inr-istart, cur, nbMatches, maxML);
 
                     if ( (maxML > sufficient_len)
-                       | (cur + maxML >= ZSTD_OPT_NUM) ) {
-                        best_mlen = maxML;
-                        best_off = matches[nbMatches-1].off;
-                        last_pos = cur + 1;
+                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
+                        lastSequence.mlen = maxML;
+                        lastSequence.off = matches[nbMatches-1].off;
+                        lastSequence.litlen = litlen;
+                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+                        last_pos = cur + ZSTD_totalLen(lastSequence);
+                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
                         goto _shortestPath;
-                    }
-                }
+                }   }
 
                 /* set prices using matches found at position == cur */
                 for (matchNb = 0; matchNb < nbMatches; matchNb++) {
@@ -811,113 +1014,233 @@
                     U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
                     U32 mlen;
 
-                    DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u",
+                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
                                 matchNb, matches[matchNb].off, lastML, litlen);
 
-                    for (mlen = lastML; mlen >= startML; mlen--) {
+                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
                         U32 const pos = cur + mlen;
                         int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
 
                         if ((pos > last_pos) || (price < opt[pos].price)) {
-                            DEBUGLOG(7, "rPos:%u => new better price (%u<%u)",
-                                        pos, price, opt[pos].price);
-                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }
+                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
                             opt[pos].mlen = mlen;
                             opt[pos].off = offset;
                             opt[pos].litlen = litlen;
                             opt[pos].price = price;
+                            ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
                             memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
                         } else {
-                            if (optLevel==0) break;  /* gets ~+10% speed for about -0.01 ratio loss */
+                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
                         }
             }   }   }
         }  /* for (cur = 1; cur <= last_pos; cur++) */
 
-        best_mlen = opt[last_pos].mlen;
-        best_off = opt[last_pos].off;
-        cur = last_pos - best_mlen;
+        lastSequence = opt[last_pos];
+        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
+        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
 
 _shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
-        assert(opt[0].mlen == 1);
+        assert(opt[0].mlen == 0);
 
-        /* reverse traversal */
-        DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)",
-                    last_pos, cur);
-        {   U32 selectedMatchLength = best_mlen;
-            U32 selectedOffset = best_off;
-            U32 pos = cur;
-            while (1) {
-                U32 const mlen = opt[pos].mlen;
-                U32 const off = opt[pos].off;
-                opt[pos].mlen = selectedMatchLength;
-                opt[pos].off = selectedOffset;
-                selectedMatchLength = mlen;
-                selectedOffset = off;
-                if (mlen > pos) break;
-                pos -= mlen;
-        }   }
+        {   U32 const storeEnd = cur + 1;
+            U32 storeStart = storeEnd;
+            U32 seqPos = cur;
 
-        /* save sequences */
-        {   U32 pos;
-            for (pos=0; pos < last_pos; ) {
-                U32 const llen = (U32)(ip - anchor);
-                U32 const mlen = opt[pos].mlen;
-                U32 const offset = opt[pos].off;
-                if (mlen == 1) { ip++; pos++; continue; }  /* literal position => move on */
-                pos += mlen; ip += mlen;
+            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+                        last_pos, cur); (void)last_pos;
+            assert(storeEnd < ZSTD_OPT_NUM);
+            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+            opt[storeEnd] = lastSequence;
+            while (seqPos > 0) {
+                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+                storeStart--;
+                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+                opt[storeStart] = opt[seqPos];
+                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+            }
 
-                /* repcodes update : like ZSTD_updateRep(), but update in place */
-                if (offset >= ZSTD_REP_NUM) {  /* full offset */
-                    rep[2] = rep[1];
-                    rep[1] = rep[0];
-                    rep[0] = offset - ZSTD_REP_MOVE;
-                } else {   /* repcode */
-                    U32 const repCode = offset + (llen==0);
-                    if (repCode) {  /* note : if repCode==0, no change */
-                        U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
-                        if (repCode >= 2) rep[2] = rep[1];
-                        rep[1] = rep[0];
-                        rep[0] = currentOffset;
+            /* save sequences */
+            DEBUGLOG(6, "sending selected sequences into seqStore")
+            {   U32 storePos;
+                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+                    U32 const llen = opt[storePos].litlen;
+                    U32 const mlen = opt[storePos].mlen;
+                    U32 const offCode = opt[storePos].off;
+                    U32 const advance = llen + mlen;
+                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+                                anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
+                        assert(storePos == storeEnd);   /* must be last sequence */
+                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
+                        continue;   /* will finish */
                     }
-                }
 
-                ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
-                ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
-                anchor = ip;
-        }   }
-        ZSTD_setLog2Prices(optStatePtr);
+                    /* repcodes update : like ZSTD_updateRep(), but update in place */
+                    if (offCode >= ZSTD_REP_NUM) {  /* full offset */
+                        rep[2] = rep[1];
+                        rep[1] = rep[0];
+                        rep[0] = offCode - ZSTD_REP_MOVE;
+                    } else {   /* repcode */
+                        U32 const repCode = offCode + (llen==0);
+                        if (repCode) {  /* note : if repCode==0, no change */
+                            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+                            if (repCode >= 2) rep[2] = rep[1];
+                            rep[1] = rep[0];
+                            rep[0] = currentOffset;
+                    }   }
+
+                    assert(anchor + llen <= iend);
+                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+                    ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
+                    anchor += advance;
+                    ip = anchor;
+            }   }
+            ZSTD_setBasePrices(optStatePtr, optLevel);
+        }
+
     }   /* while (ip < ilimit) */
 
     /* Return the last literals size */
-    return iend - anchor;
+    return (size_t)(iend - anchor);
 }
 
 
 size_t ZSTD_compressBlock_btopt(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
     DEBUGLOG(5, "ZSTD_compressBlock_btopt");
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+}
+
+
+/* used in 2-pass strategy */
+static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
+{
+    U32 s, sum=0;
+    assert(ZSTD_FREQ_DIV+bonus >= 0);
+    for (s=0; s<lastEltIndex+1; s++) {
+        table[s] <<= ZSTD_FREQ_DIV+bonus;
+        table[s]--;
+        sum += table[s];
+    }
+    return sum;
+}
+
+/* used in 2-pass strategy */
+MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
+{
+    if (ZSTD_compressedLiterals(optPtr))
+        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+}
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its contract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+                     seqStore_t* seqStore,
+                     U32 rep[ZSTD_REP_NUM],
+               const void* src, size_t srcSize)
+{
+    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
+    memcpy(tmpRep, rep, sizeof(tmpRep));
+
+    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+    assert(ms->opt.litLengthSum == 0);    /* first block */
+    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
+    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
+    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
+
+    /* invalidate first scan from history */
+    ZSTD_resetSeqStore(seqStore);
+    ms->window.base -= srcSize;
+    ms->window.dictLimit += (U32)srcSize;
+    ms->window.lowLimit = ms->window.dictLimit;
+    ms->nextToUpdate = ms->window.dictLimit;
+
+    /* re-inforce weight of collected statistics */
+    ZSTD_upscaleStats(&ms->opt);
 }
 
 size_t ZSTD_compressBlock_btultra(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
+    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        const void* src, size_t srcSize)
+{
+    U32 const current = (U32)((const BYTE*)src - ms->window.base);
+    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+    /* 2-pass strategy:
+     * this strategy makes a first pass over first block to collect statistics
+     * and seed next round's statistics with it.
+     * After 1st pass, function forgets everything, and starts a new block.
+     * Consequently, this can only work if no data has been previously loaded in tables,
+     * aka, no dictionary, no prefix, no ldm preprocessing.
+     * The compression ratio gain is generally small (~0.5% on first block),
+     * the cost is 2x cpu time on first block. */
+    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+    if ( (ms->opt.litLengthSum==0)   /* first block */
+      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
+      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
+      && (current == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
+      && (srcSize > ZSTD_PREDEF_THRESHOLD)
+      ) {
+        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+    }
+
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        const void* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        const void* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
 }
 
 size_t ZSTD_compressBlock_btopt_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
 }
 
 size_t ZSTD_compressBlock_btultra_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
 }
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.h b/vendor/github.com/DataDog/zstd/zstd_opt.h
index b8dc389..094f747 100644
--- a/vendor/github.com/DataDog/zstd/zstd_opt.h
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.h
@@ -17,23 +17,37 @@
 
 #include "zstd_compress_internal.h"
 
-void ZSTD_updateTree(
-        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-        const BYTE* ip, const BYTE* iend);  /* used in ZSTD_loadDictionaryContent() */
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
 
 size_t ZSTD_compressBlock_btopt(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btultra(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 
 size_t ZSTD_compressBlock_btopt_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btultra_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+
+        /* note : no btultra2 variant for extDict nor dictMatchState,
+         * because btultra2 is not meant to work with dictionaries
+         * and is only specific for the first block (no prefix) */
 
 #if defined (__cplusplus)
 }
diff --git a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go
index d5d1336..2330353 100644
--- a/vendor/github.com/DataDog/zstd/zstd_stream.go
+++ b/vendor/github.com/DataDog/zstd/zstd_stream.go
@@ -11,6 +11,7 @@
 	"errors"
 	"fmt"
 	"io"
+	"runtime"
 	"unsafe"
 )
 
@@ -243,6 +244,8 @@
 			unsafe.Pointer(&src[0]),
 			&cSrcSize))
 
+		// Keep src here eventhough, we reuse later, the code might be deleted at some point
+		runtime.KeepAlive(src)
 		if err = getError(retCode); err != nil {
 			return 0, fmt.Errorf("failed to decompress: %s", err)
 		}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c
index ae1cb2c..ae8cba2 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v01.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.c
@@ -668,11 +668,17 @@
         switch(srcSize)
         {
             case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+                    /* fallthrough */
             case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+                    /* fallthrough */
             case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+                    /* fallthrough */
             case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+                    /* fallthrough */
             case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+                    /* fallthrough */
             case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+                    /* fallthrough */
             default:;
         }
         contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
@@ -1067,99 +1073,102 @@
     const void* cSrc, size_t cSrcSize,
     const U16* DTable)
 {
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* op = ostart;
-    BYTE* const omax = op + maxDstSize;
-    BYTE* const olimit = omax-15;
-
-    const void* ptr = DTable;
-    const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
-    const U32 dtLog = DTable[0];
-    size_t errorCode;
-    U32 reloadStatus;
-
-    /* Init */
-
-    const U16* jumpTable = (const U16*)cSrc;
-    const size_t length1 = FSE_readLE16(jumpTable);
-    const size_t length2 = FSE_readLE16(jumpTable+1);
-    const size_t length3 = FSE_readLE16(jumpTable+2);
-    const size_t length4 = cSrcSize - 6 - length1 - length2 - length3;   // check coherency !!
-    const char* const start1 = (const char*)(cSrc) + 6;
-    const char* const start2 = start1 + length1;
-    const char* const start3 = start2 + length2;
-    const char* const start4 = start3 + length3;
-    FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
-
-    if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
-
-    errorCode = FSE_initDStream(&bitD1, start1, length1);
-    if (FSE_isError(errorCode)) return errorCode;
-    errorCode = FSE_initDStream(&bitD2, start2, length2);
-    if (FSE_isError(errorCode)) return errorCode;
-    errorCode = FSE_initDStream(&bitD3, start3, length3);
-    if (FSE_isError(errorCode)) return errorCode;
-    errorCode = FSE_initDStream(&bitD4, start4, length4);
-    if (FSE_isError(errorCode)) return errorCode;
-
-    reloadStatus=FSE_reloadDStream(&bitD2);
-
-    /* 16 symbols per loop */
-    for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit);  /* D2-3-4 are supposed to be synchronized and finish together */
-        op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
+    if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong;
     {
-#define HUF_DECODE_SYMBOL_0(n, Dstream) \
-        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* op = ostart;
+        BYTE* const omax = op + maxDstSize;
+        BYTE* const olimit = omax-15;
 
-#define HUF_DECODE_SYMBOL_1(n, Dstream) \
-        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
-        if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
+        const void* ptr = DTable;
+        const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+        U32 reloadStatus;
 
-#define HUF_DECODE_SYMBOL_2(n, Dstream) \
-        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
-        if (FSE_32bits()) FSE_reloadDStream(&Dstream)
+        /* Init */
 
-        HUF_DECODE_SYMBOL_1( 0, bitD1);
-        HUF_DECODE_SYMBOL_1( 1, bitD2);
-        HUF_DECODE_SYMBOL_1( 2, bitD3);
-        HUF_DECODE_SYMBOL_1( 3, bitD4);
-        HUF_DECODE_SYMBOL_2( 4, bitD1);
-        HUF_DECODE_SYMBOL_2( 5, bitD2);
-        HUF_DECODE_SYMBOL_2( 6, bitD3);
-        HUF_DECODE_SYMBOL_2( 7, bitD4);
-        HUF_DECODE_SYMBOL_1( 8, bitD1);
-        HUF_DECODE_SYMBOL_1( 9, bitD2);
-        HUF_DECODE_SYMBOL_1(10, bitD3);
-        HUF_DECODE_SYMBOL_1(11, bitD4);
-        HUF_DECODE_SYMBOL_0(12, bitD1);
-        HUF_DECODE_SYMBOL_0(13, bitD2);
-        HUF_DECODE_SYMBOL_0(14, bitD3);
-        HUF_DECODE_SYMBOL_0(15, bitD4);
-    }
+        const U16* jumpTable = (const U16*)cSrc;
+        const size_t length1 = FSE_readLE16(jumpTable);
+        const size_t length2 = FSE_readLE16(jumpTable+1);
+        const size_t length3 = FSE_readLE16(jumpTable+2);
+        const size_t length4 = cSrcSize - 6 - length1 - length2 - length3;   // check coherency !!
+        const char* const start1 = (const char*)(cSrc) + 6;
+        const char* const start2 = start1 + length1;
+        const char* const start3 = start2 + length2;
+        const char* const start4 = start3 + length3;
+        FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
 
-    if (reloadStatus!=FSE_DStream_completed)   /* not complete : some bitStream might be FSE_DStream_unfinished */
-        return (size_t)-FSE_ERROR_corruptionDetected;
+        if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
 
-    /* tail */
-    {
-        // bitTail = bitD1;   // *much* slower : -20% !??!
-        FSE_DStream_t bitTail;
-        bitTail.ptr = bitD1.ptr;
-        bitTail.bitsConsumed = bitD1.bitsConsumed;
-        bitTail.bitContainer = bitD1.bitContainer;   // required in case of FSE_DStream_endOfBuffer
-        bitTail.start = start1;
-        for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
+        errorCode = FSE_initDStream(&bitD1, start1, length1);
+        if (FSE_isError(errorCode)) return errorCode;
+        errorCode = FSE_initDStream(&bitD2, start2, length2);
+        if (FSE_isError(errorCode)) return errorCode;
+        errorCode = FSE_initDStream(&bitD3, start3, length3);
+        if (FSE_isError(errorCode)) return errorCode;
+        errorCode = FSE_initDStream(&bitD4, start4, length4);
+        if (FSE_isError(errorCode)) return errorCode;
+
+        reloadStatus=FSE_reloadDStream(&bitD2);
+
+        /* 16 symbols per loop */
+        for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit);  /* D2-3-4 are supposed to be synchronized and finish together */
+            op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
         {
-            HUF_DECODE_SYMBOL_0(0, bitTail);
+    #define HUF_DECODE_SYMBOL_0(n, Dstream) \
+            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
+
+    #define HUF_DECODE_SYMBOL_1(n, Dstream) \
+            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+            if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
+
+    #define HUF_DECODE_SYMBOL_2(n, Dstream) \
+            op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+            if (FSE_32bits()) FSE_reloadDStream(&Dstream)
+
+            HUF_DECODE_SYMBOL_1( 0, bitD1);
+            HUF_DECODE_SYMBOL_1( 1, bitD2);
+            HUF_DECODE_SYMBOL_1( 2, bitD3);
+            HUF_DECODE_SYMBOL_1( 3, bitD4);
+            HUF_DECODE_SYMBOL_2( 4, bitD1);
+            HUF_DECODE_SYMBOL_2( 5, bitD2);
+            HUF_DECODE_SYMBOL_2( 6, bitD3);
+            HUF_DECODE_SYMBOL_2( 7, bitD4);
+            HUF_DECODE_SYMBOL_1( 8, bitD1);
+            HUF_DECODE_SYMBOL_1( 9, bitD2);
+            HUF_DECODE_SYMBOL_1(10, bitD3);
+            HUF_DECODE_SYMBOL_1(11, bitD4);
+            HUF_DECODE_SYMBOL_0(12, bitD1);
+            HUF_DECODE_SYMBOL_0(13, bitD2);
+            HUF_DECODE_SYMBOL_0(14, bitD3);
+            HUF_DECODE_SYMBOL_0(15, bitD4);
         }
 
-        if (FSE_endOfDStream(&bitTail))
-            return op-ostart;
+        if (reloadStatus!=FSE_DStream_completed)   /* not complete : some bitStream might be FSE_DStream_unfinished */
+            return (size_t)-FSE_ERROR_corruptionDetected;
+
+        /* tail */
+        {
+            // bitTail = bitD1;   // *much* slower : -20% !??!
+            FSE_DStream_t bitTail;
+            bitTail.ptr = bitD1.ptr;
+            bitTail.bitsConsumed = bitD1.bitsConsumed;
+            bitTail.bitContainer = bitD1.bitContainer;   // required in case of FSE_DStream_endOfBuffer
+            bitTail.start = start1;
+            for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
+            {
+                HUF_DECODE_SYMBOL_0(0, bitTail);
+            }
+
+            if (FSE_endOfDStream(&bitTail))
+                return op-ostart;
+        }
+
+        if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
+
+        return (size_t)-FSE_ERROR_corruptionDetected;
     }
-
-    if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
-
-    return (size_t)-FSE_ERROR_corruptionDetected;
 }
 
 
@@ -1330,6 +1339,8 @@
 #define LITERAL_NOENTROPY 63
 #define COMMAND_NOENTROPY 7   /* to remove */
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 static const size_t ZSTD_blockHeaderSize = 3;
 static const size_t ZSTD_frameHeaderSize = 4;
 
@@ -1347,8 +1358,6 @@
 
 static U16    ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
 
-static U32    ZSTD_read32(const void* p) { U32 r; memcpy(&r, p, sizeof(r)); return r; }
-
 static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
 
 static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
@@ -1373,16 +1382,9 @@
     }
 }
 
-
-static U32 ZSTD_readLE32(const void* memPtr)
+static U32 ZSTD_readLE24(const void* memPtr)
 {
-    if (ZSTD_isLittleEndian())
-        return ZSTD_read32(memPtr);
-    else
-    {
-        const BYTE* p = (const BYTE*)memPtr;
-        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
-    }
+    return ZSTD_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
 }
 
 static U32 ZSTD_readBE32(const void* memPtr)
@@ -1458,7 +1460,7 @@
 *   Decompression code
 **************************************************************/
 
-size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
 {
     const BYTE* const in = (const BYTE* const)src;
     BYTE headerFlags;
@@ -1511,7 +1513,7 @@
 }
 
 
-size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
+static size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
                                 void* dst, size_t maxDstSize,
                           const BYTE** litStart, size_t* litSize,
                           const void* src, size_t srcSize)
@@ -1563,7 +1565,7 @@
 }
 
 
-size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
                          FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
                          const void* src, size_t srcSize)
 {
@@ -1696,13 +1698,13 @@
     seqState->prevOffset = seq->offset;
     if (litLength == MaxLL)
     {
-        U32 add = dumps<de ? *dumps++ : 0;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) litLength += add;
         else
         {
             if (dumps<=(de-3))
             {
-                litLength = ZSTD_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+                litLength = ZSTD_readLE24(dumps);
                 dumps += 3;
             }
         }
@@ -1724,13 +1726,13 @@
     matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
     if (matchLength == MaxML)
     {
-        U32 add = dumps<de ? *dumps++ : 0;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) matchLength += add;
         else
         {
             if (dumps<=(de-3))
             {
-                matchLength = ZSTD_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+                matchLength = ZSTD_readLE24(dumps);
                 dumps += 3;
             }
         }
@@ -1751,7 +1753,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     const size_t litLength = sequence.litLength;
     BYTE* const endMatch = op + litLength + sequence.matchLength;    /* risk : address space overflow (32-bits) */
@@ -1993,36 +1995,59 @@
     return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
 }
 
-size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     U32 magicNumber;
     blockProperties_t blockProperties;
 
     /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
     magicNumber = ZSTD_readBE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    if (magicNumber != ZSTD_magicNumber) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+        return;
+    }
     ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
 
     /* Loop on each block */
     while (1)
     {
         size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv01_isError(blockSize)) return blockSize;
+        if (ZSTDv01_isError(blockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);
+            return;
+        }
 
         ip += ZSTD_blockHeaderSize;
         remainingSize -= ZSTD_blockHeaderSize;
-        if (blockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (blockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (blockSize == 0) break;   /* bt_end */
 
         ip += blockSize;
         remainingSize -= blockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * BLOCKSIZE;
 }
 
 /*******************************
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.h b/vendor/github.com/DataDog/zstd/zstd_v01.h
index 42f0897..245f9dd 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v01.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.h
@@ -35,13 +35,18 @@
 size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
                      const void* src, size_t compressedSize);
 
-/**
-ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
-*/
-size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize);
+ /**
+ ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format
+     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
+     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+     note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                     size_t* cSize, unsigned long long* dBound);
 
 /**
 ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c
index 8bc0ece..793df60 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v02.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.c
@@ -217,6 +217,11 @@
     }
 }
 
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
 {
     if (MEM_isLittleEndian())
@@ -399,11 +404,17 @@
         switch(srcSize)
         {
             case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+                    /* fallthrough */
             case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+                    /* fallthrough */
             case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+                    /* fallthrough */
             case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+                    /* fallthrough */
             case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+                    /* fallthrough */
             case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+                    /* fallthrough */
             default:;
         }
         contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
@@ -2722,6 +2733,8 @@
 #define LITERAL_NOENTROPY 63
 #define COMMAND_NOENTROPY 7   /* to remove */
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 static const size_t ZSTD_blockHeaderSize = 3;
 static const size_t ZSTD_frameHeaderSize = 4;
 
@@ -3035,11 +3048,11 @@
     seqState->prevOffset = seq->offset;
     if (litLength == MaxLL)
     {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) litLength += add;
-        else
+        else if (dumps + 3 <= de)
         {
-            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            litLength = MEM_readLE24(dumps);
             dumps += 3;
         }
         if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
@@ -3065,11 +3078,11 @@
     matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
     if (matchLength == MaxML)
     {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) matchLength += add;
-        else
+        else if (dumps + 3 <= de)
         {
-            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            matchLength = MEM_readLE24(dumps);
             dumps += 3;
         }
         if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
@@ -3090,7 +3103,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     BYTE* const oLitEnd = op + sequence.litLength;
     BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
@@ -3306,37 +3319,59 @@
     return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
 }
 
-static size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
 {
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
 
+void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
+{
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     U32 magicNumber;
     blockProperties_t blockProperties;
 
     /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
     magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    if (magicNumber != ZSTD_magicNumber) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+        return;
+    }
     ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
 
     /* Loop on each block */
     while (1)
     {
         size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+        if (ZSTD_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTD_blockHeaderSize;
         remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (cBlockSize == 0) break;   /* bt_end */
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * BLOCKSIZE;
 }
 
 /*******************************
@@ -3452,11 +3487,6 @@
     return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
 }
 
-size_t ZSTDv02_findFrameCompressedSize(const void *src, size_t compressedSize)
-{
-    return ZSTD_findFrameCompressedSize(src, compressedSize);
-}
-
 ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
 {
     return (ZSTDv02_Dctx*)ZSTD_createDCtx();
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.h b/vendor/github.com/DataDog/zstd/zstd_v02.h
index 0dde7a6..9d7d8d9 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v02.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.h
@@ -35,13 +35,18 @@
 size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
                      const void* src, size_t compressedSize);
 
-/**
-ZSTDv02_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.2.x format
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv02_isError())
-*/
-size_t ZSTDv02_findFrameCompressedSize(const void* src, size_t compressedSize);
+ /**
+ ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format
+     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
+     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                     size_t* cSize, unsigned long long* dBound);
 
 /**
 ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.c b/vendor/github.com/DataDog/zstd/zstd_v03.c
index 54445af..7a0e7c9 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v03.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.c
@@ -219,6 +219,11 @@
     }
 }
 
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
 {
     if (MEM_isLittleEndian())
@@ -402,11 +407,17 @@
         switch(srcSize)
         {
             case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+                    /* fallthrough */
             case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+                    /* fallthrough */
             case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+                    /* fallthrough */
             case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+                    /* fallthrough */
             case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+                    /* fallthrough */
             case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+                    /* fallthrough */
             default:;
         }
         contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
@@ -2363,6 +2374,8 @@
 #define LITERAL_NOENTROPY 63
 #define COMMAND_NOENTROPY 7   /* to remove */
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 static const size_t ZSTD_blockHeaderSize = 3;
 static const size_t ZSTD_frameHeaderSize = 4;
 
@@ -2676,11 +2689,11 @@
     seqState->prevOffset = seq->offset;
     if (litLength == MaxLL)
     {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) litLength += add;
-        else
+        else if (dumps + 3 <= de)
         {
-            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            litLength = MEM_readLE24(dumps);
             dumps += 3;
         }
         if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
@@ -2706,11 +2719,11 @@
     matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
     if (matchLength == MaxML)
     {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) matchLength += add;
-        else
+        else if (dumps + 3 <= de)
         {
-            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            matchLength = MEM_readLE24(dumps);
             dumps += 3;
         }
         if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
@@ -2731,7 +2744,7 @@
                                 BYTE* const base, BYTE* const oend)
 {
     static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
-    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* subtracted */
     const BYTE* const ostart = op;
     BYTE* const oLitEnd = op + sequence.litLength;
     BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
@@ -2947,36 +2960,59 @@
     return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
 }
 
-static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     U32 magicNumber;
     blockProperties_t blockProperties;
 
     /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
     magicNumber = MEM_readLE32(src);
-    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    if (magicNumber != ZSTD_magicNumber) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+        return;
+    }
     ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
 
     /* Loop on each block */
     while (1)
     {
         size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+        if (ZSTD_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTD_blockHeaderSize;
         remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (cBlockSize == 0) break;   /* bt_end */
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * BLOCKSIZE;
 }
 
 
@@ -3093,11 +3129,6 @@
     return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
 }
 
-size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t srcSize)
-{
-    return ZSTD_findFrameCompressedSize(src, srcSize);
-}
-
 ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
 {
     return (ZSTDv03_Dctx*)ZSTD_createDCtx();
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.h b/vendor/github.com/DataDog/zstd/zstd_v03.h
index b4449e2..efd8c2b 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v03.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.h
@@ -35,13 +35,18 @@
 size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
                      const void* src, size_t compressedSize);
 
-/**
-ZSTDv03_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.3.x format
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv03_isError())
-*/
-size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t compressedSize);
+ /**
+ ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format
+     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
+     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+ void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                      size_t* cSize, unsigned long long* dBound);
 
     /**
 ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.c b/vendor/github.com/DataDog/zstd/zstd_v04.c
index fb6d1d4..645a6e3 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v04.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.c
@@ -9,14 +9,19 @@
  */
 
 
-/*- Dependencies -*/
+ /******************************************
+ *  Includes
+ ******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+
 #include "zstd_v04.h"
 #include "error_private.h"
 
 
 /* ******************************************************************
-   mem.h
-****************************************************************** */
+ *   mem.h
+ *******************************************************************/
 #ifndef MEM_H_MODULE
 #define MEM_H_MODULE
 
@@ -24,12 +29,6 @@
 extern "C" {
 #endif
 
-/******************************************
-*  Includes
-******************************************/
-#include <stddef.h>    /* size_t, ptrdiff_t */
-#include <string.h>    /* memcpy */
-
 
 /******************************************
 *  Compiler-specific
@@ -75,38 +74,9 @@
 /*-*************************************
 *  Debug
 ***************************************/
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
-#  include <assert.h>
-#else
-#  ifndef assert
-#    define assert(condition) ((void)0)
-#  endif
-#endif
-
-#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
-
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
-#  include <stdio.h>
-extern int g_debuglog_enable;
-/* recommended values for ZSTD_DEBUG display levels :
- * 1 : no display, enables assert() only
- * 2 : reserved for currently active debug path
- * 3 : events once per object lifetime (CCtx, CDict, etc.)
- * 4 : events once per frame
- * 5 : events once per block
- * 6 : events once per sequence (*very* verbose) */
-#  define RAWLOG(l, ...) {                                      \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __VA_ARGS__);               \
-            }   }
-#  define DEBUGLOG(l, ...) {                                    \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
-                    fprintf(stderr, " \n");                     \
-            }   }
-#else
-#  define RAWLOG(l, ...)      {}    /* disabled */
-#  define DEBUGLOG(l, ...)    {}    /* disabled */
+#include "debug.h"
+#ifndef assert
+#  define assert(condition) ((void)0)
 #endif
 
 
@@ -219,6 +189,11 @@
     }
 }
 
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
 {
     if (MEM_isLittleEndian())
@@ -266,29 +241,11 @@
 #ifndef ZSTD_STATIC_H
 #define ZSTD_STATIC_H
 
-/* The objects defined into this file shall be considered experimental.
- * They are not considered stable, as their prototype may change in the future.
- * You can use them for tests, provide feedback, or if you can endure risks of future changes.
- */
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
 
 /* *************************************
 *  Types
 ***************************************/
-#define ZSTD_WINDOWLOG_MAX 26
-#define ZSTD_WINDOWLOG_MIN 18
 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
-#define ZSTD_CONTENTLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
-#define ZSTD_CONTENTLOG_MIN 4
-#define ZSTD_HASHLOG_MAX 28
-#define ZSTD_HASHLOG_MIN 4
-#define ZSTD_SEARCHLOG_MAX (ZSTD_CONTENTLOG_MAX-1)
-#define ZSTD_SEARCHLOG_MIN 1
-#define ZSTD_SEARCHLENGTH_MAX 7
-#define ZSTD_SEARCHLENGTH_MIN 4
 
 /** from faster to stronger */
 typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
@@ -360,9 +317,6 @@
 */
 
 
-#if defined (__cplusplus)
-}
-#endif
 
 
 #endif  /* ZSTD_STATIC_H */
@@ -375,10 +329,6 @@
 #ifndef ZSTD_CCOMMON_H_MODULE
 #define ZSTD_CCOMMON_H_MODULE
 
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
 /* *************************************
 *  Common macros
 ***************************************/
@@ -428,6 +378,8 @@
 #define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
 #define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
 
 
@@ -450,10 +402,6 @@
 }
 
 
-#if defined (__cplusplus)
-}
-#endif
-
 
 /* ******************************************************************
    FSE : Finite State Entropy coder
@@ -1142,6 +1090,7 @@
     if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
 
     /* Init, lay down lowprob symbols */
+    memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
     DTableH.tableLog = (U16)tableLog;
     for (s=0; s<=maxSymbolValue; s++)
     {
@@ -2864,13 +2813,12 @@
     litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
     prevOffset = litLength ? seq->offset : seqState->prevOffset;
     if (litLength == MaxLL) {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) litLength += add;
-        else {
-            litLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+        else if (dumps + 3 <= de) {
+            litLength = MEM_readLE24(dumps);
             dumps += 3;
         }
-        if (dumps > de) { litLength = MaxLL+255; }  /* late correction, to avoid using uninitialized memory */
         if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
     }
 
@@ -2893,13 +2841,12 @@
     /* MatchLength */
     matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
     if (matchLength == MaxML) {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) matchLength += add;
-        else {
-            matchLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+        else if (dumps + 3 <= de){
+            matchLength = MEM_readLE24(dumps);
             dumps += 3;
         }
-        if (dumps > de) { matchLength = MaxML+255; }  /* late correction, to avoid using uninitialized memory */
         if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
     }
     matchLength += MINMATCH;
@@ -2918,7 +2865,7 @@
                                 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
 {
     static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
     BYTE* const oLitEnd = op + sequence.litLength;
     const size_t sequenceLength = sequence.litLength + sequence.matchLength;
     BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
@@ -2991,7 +2938,7 @@
     }
     else
     {
-        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8, but must be signed */
     }
     return sequenceLength;
 }
@@ -3177,34 +3124,57 @@
     return op-ostart;
 }
 
-static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     blockProperties_t blockProperties;
 
     /* Frame Header */
-    if (srcSize < ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
-    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+    if (srcSize < ZSTD_frameHeaderSize_min) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
+    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+        return;
+    }
     ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
 
     /* Loop on each block */
     while (1)
     {
         size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+        if (ZSTD_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTD_blockHeaderSize;
         remainingSize -= ZSTD_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (cBlockSize == 0) break;   /* bt_end */
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * BLOCKSIZE;
 }
 
 /* ******************************
@@ -3636,11 +3606,6 @@
 #endif
 }
 
-size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t srcSize)
-{
-    return ZSTD_findFrameCompressedSize(src, srcSize);
-}
-
 size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
 
 size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
@@ -3670,8 +3635,3 @@
 
 ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
 size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }
-
-size_t ZSTDv04_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
-{
-    return ZSTD_getFrameParams(params, src, srcSize);
-}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.h b/vendor/github.com/DataDog/zstd/zstd_v04.h
index 6391631..bb5f3b7 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v04.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.h
@@ -35,13 +35,18 @@
 size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
                      const void* src, size_t compressedSize);
 
-/**
-ZSTDv04_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.4.x format
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv04_isError())
-*/
-size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t compressedSize);
+ /**
+ ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format
+     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
+     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+ void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                      size_t* cSize, unsigned long long* dBound);
 
 /**
 ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.c b/vendor/github.com/DataDog/zstd/zstd_v05.c
index a5e1b1f..a7ea606 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v05.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.c
@@ -218,6 +218,11 @@
     }
 }
 
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
 {
     if (MEM_isLittleEndian())
@@ -491,6 +496,8 @@
 
 #define WILDCOPY_OVERLENGTH 8
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
 
 
@@ -836,7 +843,7 @@
     bitD->bitsConsumed += nbBits;
 }
 
-MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
 {
     size_t value = BITv05_lookBits(bitD, nbBits);
     BITv05_skipBits(bitD, nbBits);
@@ -845,7 +852,7 @@
 
 /*!BITv05_readBitsFast :
 *  unsafe version; only works only if nbBits >= 1 */
-MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
 {
     size_t value = BITv05_lookBitsFast(bitD, nbBits);
     BITv05_skipBits(bitD, nbBits);
@@ -1162,7 +1169,7 @@
 /* **************************************************************
 *  Complex types
 ****************************************************************/
-typedef U32 DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
+typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
 
 
 /* **************************************************************
@@ -1224,6 +1231,7 @@
     if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
 
     /* Init, lay down lowprob symbols */
+    memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) );   /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
     DTableH.tableLog = (U16)tableLog;
     for (s=0; s<=maxSymbolValue; s++) {
         if (normalizedCounter[s]==-1) {
@@ -1995,91 +2003,92 @@
     const void* cSrc, size_t cSrcSize,
     const U16* DTable)
 {
-    const BYTE* const istart = (const BYTE*) cSrc;
-    BYTE* const ostart = (BYTE*) dst;
-    BYTE* const oend = ostart + dstSize;
-    const void* const dtPtr = DTable;
-    const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
-    const U32 dtLog = DTable[0];
-    size_t errorCode;
-
-    /* Init */
-    BITv05_DStream_t bitD1;
-    BITv05_DStream_t bitD2;
-    BITv05_DStream_t bitD3;
-    BITv05_DStream_t bitD4;
-    const size_t length1 = MEM_readLE16(istart);
-    const size_t length2 = MEM_readLE16(istart+2);
-    const size_t length3 = MEM_readLE16(istart+4);
-    size_t length4;
-    const BYTE* const istart1 = istart + 6;  /* jumpTable */
-    const BYTE* const istart2 = istart1 + length1;
-    const BYTE* const istart3 = istart2 + length2;
-    const BYTE* const istart4 = istart3 + length3;
-    const size_t segmentSize = (dstSize+3) / 4;
-    BYTE* const opStart2 = ostart + segmentSize;
-    BYTE* const opStart3 = opStart2 + segmentSize;
-    BYTE* const opStart4 = opStart3 + segmentSize;
-    BYTE* op1 = ostart;
-    BYTE* op2 = opStart2;
-    BYTE* op3 = opStart3;
-    BYTE* op4 = opStart4;
-    U32 endSignal;
-
     /* Check */
     if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
 
-    length4 = cSrcSize - (length1 + length2 + length3 + 6);
-    if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-    errorCode = BITv05_initDStream(&bitD1, istart1, length1);
-    if (HUFv05_isError(errorCode)) return errorCode;
-    errorCode = BITv05_initDStream(&bitD2, istart2, length2);
-    if (HUFv05_isError(errorCode)) return errorCode;
-    errorCode = BITv05_initDStream(&bitD3, istart3, length3);
-    if (HUFv05_isError(errorCode)) return errorCode;
-    errorCode = BITv05_initDStream(&bitD4, istart4, length4);
-    if (HUFv05_isError(errorCode)) return errorCode;
+        /* Init */
+        BITv05_DStream_t bitD1;
+        BITv05_DStream_t bitD2;
+        BITv05_DStream_t bitD3;
+        BITv05_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
 
-    /* 16-32 symbols per loop (4-8 symbols per stream) */
-    endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
-    for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
-        HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
-        HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
-        HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
-        HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
-        HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
-        HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
-        HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
-        HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
-        HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
-        HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
-        HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
-        HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
-        HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
-        HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
-        HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
-        HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BITv05_initDStream(&bitD1, istart1, length1);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD2, istart2, length2);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD3, istart3, length3);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD4, istart4, length4);
+        if (HUFv05_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
         endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
+            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv05_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
     }
-
-    /* check corruption */
-    if (op1 > opStart2) return ERROR(corruption_detected);
-    if (op2 > opStart3) return ERROR(corruption_detected);
-    if (op3 > opStart4) return ERROR(corruption_detected);
-    /* note : op4 supposed already verified within main loop */
-
-    /* finish bitStreams one by one */
-    HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-    HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-    HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-    HUFv05_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-    /* check */
-    endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
-    if (!endSignal) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
 }
 
 
@@ -2190,7 +2199,7 @@
     }
 }
 
-size_t HUFv05_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
 {
     BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
     sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
@@ -2204,7 +2213,7 @@
     void* dtPtr = DTable;
     HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
 
-    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned));   /* if compilation fails here, assertion is false */
     if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
     //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
 
@@ -2331,7 +2340,7 @@
 size_t HUFv05_decompress1X4_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
+    const unsigned* DTable)
 {
     const BYTE* const istart = (const BYTE*) cSrc;
     BYTE* const ostart = (BYTE*) dst;
@@ -2374,7 +2383,7 @@
 size_t HUFv05_decompress4X4_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
-    const U32* DTable)
+    const unsigned* DTable)
 {
     if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
 
@@ -2658,6 +2667,7 @@
     BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
 };  /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */
 
+size_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */
 size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }
 
 size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)
@@ -2822,7 +2832,7 @@
 }
 
 
-size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
 {
     const BYTE* const in = (const BYTE* const)src;
     BYTE headerFlags;
@@ -2845,6 +2855,7 @@
 
 static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
 {
+    if (dst==NULL) return ERROR(dstSize_tooSmall);
     if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
     memcpy(dst, src, srcSize);
     return srcSize;
@@ -2853,8 +2864,8 @@
 
 /*! ZSTDv05_decodeLiteralsBlock() :
     @return : nb of bytes read from src (< srcSize ) */
-size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
-                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+static size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
+                                    const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
 {
     const BYTE* const istart = (const BYTE*) src;
 
@@ -2988,7 +2999,7 @@
 }
 
 
-size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
                          FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,
                          const void* src, size_t srcSize, U32 flagStaticTable)
 {
@@ -2996,7 +3007,7 @@
     const BYTE* ip = istart;
     const BYTE* const iend = istart + srcSize;
     U32 LLtype, Offtype, MLtype;
-    U32 LLlog, Offlog, MLlog;
+    unsigned LLlog, Offlog, MLlog;
     size_t dumpsLength;
 
     /* check */
@@ -3054,7 +3065,7 @@
             break;
         case FSEv05_ENCODING_DYNAMIC :
         default :   /* impossible */
-            {   U32 max = MaxLL;
+            {   unsigned max = MaxLL;
                 headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
                 if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
                 if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
@@ -3078,7 +3089,7 @@
             break;
         case FSEv05_ENCODING_DYNAMIC :
         default :   /* impossible */
-            {   U32 max = MaxOff;
+            {   unsigned max = MaxOff;
                 headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
                 if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
                 if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
@@ -3102,7 +3113,7 @@
             break;
         case FSEv05_ENCODING_DYNAMIC :
         default :   /* impossible */
-            {   U32 max = MaxML;
+            {   unsigned max = MaxML;
                 headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
                 if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
                 if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
@@ -3145,14 +3156,13 @@
     litLength = FSEv05_peakSymbol(&(seqState->stateLL));
     prevOffset = litLength ? seq->offset : seqState->prevOffset;
     if (litLength == MaxLL) {
-        U32 add = *dumps++;
+        const U32 add = *dumps++;
         if (add < 255) litLength += add;
-        else {
-            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no risk : dumps is always followed by seq tables > 1 byte */
+        else if (dumps + 3 <= de) {
+            litLength = MEM_readLE24(dumps);
             if (litLength&1) litLength>>=1, dumps += 3;
             else litLength = (U16)(litLength)>>1, dumps += 2;
         }
-        if (dumps > de) { litLength = MaxLL+255; }  /* late correction, to avoid using uninitialized memory */
         if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
     }
 
@@ -3179,14 +3189,13 @@
     /* MatchLength */
     matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
     if (matchLength == MaxML) {
-        U32 add = *dumps++;
+        const U32 add = dumps<de ? *dumps++ : 0;
         if (add < 255) matchLength += add;
-        else {
-            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+        else if (dumps + 3 <= de) {
+            matchLength = MEM_readLE24(dumps);
             if (matchLength&1) matchLength>>=1, dumps += 3;
             else matchLength = (U16)(matchLength)>>1, dumps += 2;
         }
-        if (dumps > de) { matchLength = MaxML+255; }  /* late correction, to avoid using uninitialized memory */
         if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
     }
     matchLength += MINMATCH;
@@ -3214,7 +3223,7 @@
                                 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
 {
     static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
     BYTE* const oLitEnd = op + sequence.litLength;
     const size_t sequenceLength = sequence.litLength + sequence.matchLength;
     BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
@@ -3297,14 +3306,14 @@
     BYTE* const ostart = (BYTE* const)dst;
     BYTE* op = ostart;
     BYTE* const oend = ostart + maxDstSize;
-    size_t errorCode, dumpsLength;
+    size_t errorCode, dumpsLength=0;
     const BYTE* litPtr = dctx->litPtr;
     const BYTE* const litEnd = litPtr + dctx->litSize;
-    int nbSeq;
-    const BYTE* dumps;
-    U32* DTableLL = dctx->LLTable;
-    U32* DTableML = dctx->MLTable;
-    U32* DTableOffb = dctx->OffTable;
+    int nbSeq=0;
+    const BYTE* dumps = NULL;
+    unsigned* DTableLL = dctx->LLTable;
+    unsigned* DTableML = dctx->MLTable;
+    unsigned* DTableOffb = dctx->OffTable;
     const BYTE* const base = (const BYTE*) (dctx->base);
     const BYTE* const vBase = (const BYTE*) (dctx->vBase);
     const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
@@ -3410,10 +3419,10 @@
     BYTE* const oend = ostart + maxDstSize;
     size_t remainingSize = srcSize;
     blockProperties_t blockProperties;
+    memset(&blockProperties, 0, sizeof(blockProperties));
 
     /* Frame Header */
-    {
-        size_t frameHeaderSize;
+    {   size_t frameHeaderSize;
         if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
         frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
         if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
@@ -3505,34 +3514,57 @@
 #endif
 }
 
-size_t ZSTDv05_findFrameCompressedSize(const void *src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     blockProperties_t blockProperties;
 
     /* Frame Header */
-    if (srcSize < ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);
-    if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+    if (srcSize < ZSTDv05_frameHeaderSize_min) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
+    if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+        return;
+    }
     ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
 
     /* Loop on each block */
     while (1)
     {
         size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
+        if (ZSTDv05_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTDv05_blockHeaderSize;
         remainingSize -= ZSTDv05_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (cBlockSize == 0) break;   /* bt_end */
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * BLOCKSIZE;
 }
 
 /* ******************************
@@ -3630,7 +3662,7 @@
 {
     size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
     short offcodeNCount[MaxOff+1];
-    U32 offcodeMaxValue=MaxOff, offcodeLog;
+    unsigned offcodeMaxValue=MaxOff, offcodeLog;
     short matchlengthNCount[MaxML+1];
     unsigned matchlengthMaxValue = MaxML, matchlengthLog;
     short litlengthNCount[MaxLL+1];
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.h b/vendor/github.com/DataDog/zstd/zstd_v05.h
index b68fd57..4a97985 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v05.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.h
@@ -33,13 +33,18 @@
 size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
                      const void* src, size_t compressedSize);
 
-/**
-ZSTDv05_getFrameSrcSize() : get the source length of a ZSTD frame
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv05_isError())
-*/
-size_t ZSTDv05_findFrameCompressedSize(const void* src, size_t compressedSize);
+ /**
+ ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format
+     srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+     cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                 or an error code if it fails (which can be tested using ZSTDv01_isError())
+     dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                 or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
+ */
+void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                     size_t* cSize, unsigned long long* dBound);
 
 /* *************************************
 *  Helper functions
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.c b/vendor/github.com/DataDog/zstd/zstd_v06.c
index 8b068b3..f907a3a 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v06.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.c
@@ -506,6 +506,8 @@
 #define FSEv06_ENCODING_STATIC  2
 #define FSEv06_ENCODING_DYNAMIC 3
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                       1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
                                      13,14,15,16 };
@@ -1250,9 +1252,7 @@
 /* **************************************************************
 *  HUF Error Management
 ****************************************************************/
-unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
-
-const char* HUFv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
 
 
 /*-**************************************************************
@@ -2823,7 +2823,8 @@
     BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
 };  /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */
 
-size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }   /* non published interface */
+size_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */
+size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }
 
 size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)
 {
@@ -3022,7 +3023,7 @@
 
 /*! ZSTDv06_getcBlockSize() :
 *   Provides the size of compressed block from block header `src` */
-size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
 {
     const BYTE* const in = (const BYTE* const)src;
     U32 cSize;
@@ -3041,6 +3042,7 @@
 
 static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
+    if (dst==NULL) return ERROR(dstSize_tooSmall);
     if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
     memcpy(dst, src, srcSize);
     return srcSize;
@@ -3049,7 +3051,7 @@
 
 /*! ZSTDv06_decodeLiteralsBlock() :
     @return : nb of bytes read from src (< srcSize ) */
-size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
+static size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
                           const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
 {
     const BYTE* const istart = (const BYTE*) src;
@@ -3183,7 +3185,7 @@
     @return : nb bytes read from src,
               or an error code if it fails, testable with ZSTDv06_isError()
 */
-size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
+static size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
                                  const void* src, size_t srcSize,
                                  const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
 {
@@ -3213,7 +3215,7 @@
 }
 
 
-size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
+static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
                              FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,
                              const void* src, size_t srcSize)
 {
@@ -3240,14 +3242,12 @@
     }
 
     /* FSE table descriptors */
+    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
     {   U32 const LLtype  = *ip >> 6;
         U32 const Offtype = (*ip >> 4) & 3;
         U32 const MLtype  = (*ip >> 2) & 3;
         ip++;
 
-        /* check */
-        if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
         /* Build DTables */
         {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
             if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
@@ -3358,7 +3358,7 @@
 }
 
 
-size_t ZSTDv06_execSequence(BYTE* op,
+static size_t ZSTDv06_execSequence(BYTE* op,
                                 BYTE* const oend, seq_t sequence,
                                 const BYTE** litPtr, const BYTE* const litLimit,
                                 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
@@ -3406,7 +3406,7 @@
     if (sequence.offset < 8) {
         /* close range match, overlap */
         static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
         int const sub2 = dec64table[sequence.offset];
         op[0] = match[0];
         op[1] = match[1];
@@ -3654,36 +3654,62 @@
 #endif
 }
 
-size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
     blockProperties_t blockProperties = { bt_compressed, 0 };
 
     /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
-        if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
-        if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
-        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);
+        if (ZSTDv06_isError(frameHeaderSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
+            return;
+        }
+        if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+            return;
+        }
+        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
         ip += frameHeaderSize; remainingSize -= frameHeaderSize;
     }
 
     /* Loop on each block */
     while (1) {
         size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+        if (ZSTDv06_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTDv06_blockHeaderSize;
         remainingSize -= ZSTDv06_blockHeaderSize;
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         if (cBlockSize == 0) break;   /* bt_end */
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;
 }
 
 /*_******************************
@@ -4006,7 +4032,7 @@
                     if (ZSTDv06_isError(hSize)) return hSize;
                     if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
                         memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
-                        zbd->lhSize += iend-ip; ip = iend; notDone = 0;
+                        zbd->lhSize += iend-ip;
                         *dstCapacityPtr = 0;
                         return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize;   /* remaining header bytes + next block header */
                     }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.h b/vendor/github.com/DataDog/zstd/zstd_v06.h
index fb4eb37..0781857 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v06.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.h
@@ -43,12 +43,17 @@
                                     const void* src, size_t compressedSize);
 
 /**
-ZSTDv06_getFrameSrcSize() : get the source length of a ZSTD frame
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv06_isError())
+ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format
+    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                or an error code if it fails (which can be tested using ZSTDv01_isError())
+    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
 */
-size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t compressedSize);
+void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                     size_t* cSize, unsigned long long* dBound);
 
 /* *************************************
 *  Helper functions
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.c b/vendor/github.com/DataDog/zstd/zstd_v07.c
index 70b170f..a83ddc9 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v07.c
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.c
@@ -2628,7 +2628,7 @@
 
 
 
-void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
+static void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
 {
     void* address = malloc(size);
     (void)opaque;
@@ -2636,7 +2636,7 @@
     return address;
 }
 
-void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
+static void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
 {
     (void)opaque;
     /* if (address) printf("free %p opaque=%p \n", address, opaque); */
@@ -2740,6 +2740,8 @@
 #define FSEv07_ENCODING_STATIC  2
 #define FSEv07_ENCODING_DYNAMIC 3
 
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+
 static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                       1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
                                      13,14,15,16 };
@@ -3150,10 +3152,10 @@
     const BYTE* ip = (const BYTE*)src;
 
     if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;
+    memset(fparamsPtr, 0, sizeof(*fparamsPtr));
     if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
         if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
             if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */
-            memset(fparamsPtr, 0, sizeof(*fparamsPtr));
             fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
             fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
             return 0;
@@ -3175,11 +3177,13 @@
         U32 windowSize = 0;
         U32 dictID = 0;
         U64 frameContentSize = 0;
-        if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits, which must be zero */
+        if ((fhdByte & 0x08) != 0)   /* reserved bits, which must be zero */
+            return ERROR(frameParameter_unsupported);
         if (!directMode) {
             BYTE const wlByte = ip[pos++];
             U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;
-            if (windowLog > ZSTDv07_WINDOWLOG_MAX) return ERROR(frameParameter_unsupported);
+            if (windowLog > ZSTDv07_WINDOWLOG_MAX)
+                return ERROR(frameParameter_unsupported);
             windowSize = (1U << windowLog);
             windowSize += (windowSize >> 3) * (wlByte&7);
         }
@@ -3201,7 +3205,8 @@
             case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
         }
         if (!windowSize) windowSize = (U32)frameContentSize;
-        if (windowSize > windowSizeMax) return ERROR(frameParameter_unsupported);
+        if (windowSize > windowSizeMax)
+            return ERROR(frameParameter_unsupported);
         fparamsPtr->frameContentSize = frameContentSize;
         fparamsPtr->windowSize = windowSize;
         fparamsPtr->dictID = dictID;
@@ -3220,11 +3225,10 @@
                    - frame header not completely provided (`srcSize` too small) */
 unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)
 {
-    {   ZSTDv07_frameParams fparams;
-        size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
-        if (frResult!=0) return 0;
-        return fparams.frameContentSize;
-    }
+    ZSTDv07_frameParams fparams;
+    size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
+    if (frResult!=0) return 0;
+    return fparams.frameContentSize;
 }
 
 
@@ -3248,7 +3252,7 @@
 
 /*! ZSTDv07_getcBlockSize() :
 *   Provides the size of compressed block from block header `src` */
-size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
 {
     const BYTE* const in = (const BYTE* const)src;
     U32 cSize;
@@ -3275,7 +3279,7 @@
 
 /*! ZSTDv07_decodeLiteralsBlock() :
     @return : nb of bytes read from src (< srcSize ) */
-size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
+static size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
                           const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
 {
     const BYTE* const istart = (const BYTE*) src;
@@ -3409,7 +3413,7 @@
     @return : nb bytes read from src,
               or an error code if it fails, testable with ZSTDv07_isError()
 */
-size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
+static size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
                                  const void* src, size_t srcSize,
                                  const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
 {
@@ -3439,7 +3443,7 @@
 }
 
 
-size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
+static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
                              FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,
                              const void* src, size_t srcSize)
 {
@@ -3466,14 +3470,12 @@
     }
 
     /* FSE table descriptors */
+    if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
     {   U32 const LLtype  = *ip >> 6;
         U32 const OFtype = (*ip >> 4) & 3;
         U32 const MLtype  = (*ip >> 2) & 3;
         ip++;
 
-        /* check */
-        if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
-
         /* Build DTables */
         {   size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
             if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
@@ -3629,7 +3631,7 @@
     if (sequence.offset < 8) {
         /* close range match, overlap */
         static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
-        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
         int const sub2 = dec64table[sequence.offset];
         op[0] = match[0];
         op[1] = match[1];
@@ -3771,7 +3773,7 @@
 }
 
 
-size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
 {
     if (length > dstCapacity) return ERROR(dstSize_tooSmall);
     memset(dst, byte, length);
@@ -3851,7 +3853,7 @@
 *   It avoids reloading the dictionary each time.
 *   `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().
 *   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
-size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
+static size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
                                          void* dst, size_t dstCapacity,
                                    const void* src, size_t srcSize)
 {
@@ -3893,19 +3895,40 @@
 #endif
 }
 
-size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t srcSize)
+/* ZSTD_errorFrameSizeInfoLegacy() :
+   assumes `cSize` and `dBound` are _not_ NULL */
+static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
+{
+    *cSize = ret;
+    *dBound = ZSTD_CONTENTSIZE_ERROR;
+}
+
+void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t remainingSize = srcSize;
+    size_t nbBlocks = 0;
 
     /* check */
-    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {
+        ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+        return;
+    }
 
     /* Frame Header */
-    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
-        if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
-        if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) return ERROR(prefix_unknown);
-        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);
+        if (ZSTDv07_isError(frameHeaderSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
+            return;
+        }
+        if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
+            return;
+        }
+        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
         ip += frameHeaderSize; remainingSize -= frameHeaderSize;
     }
 
@@ -3913,20 +3936,28 @@
     while (1) {
         blockProperties_t blockProperties;
         size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
-        if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+        if (ZSTDv07_isError(cBlockSize)) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
+            return;
+        }
 
         ip += ZSTDv07_blockHeaderSize;
         remainingSize -= ZSTDv07_blockHeaderSize;
 
         if (blockProperties.blockType == bt_end) break;
 
-        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+        if (cBlockSize > remainingSize) {
+            ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
+            return;
+        }
 
         ip += cBlockSize;
         remainingSize -= cBlockSize;
+        nbBlocks++;
     }
 
-    return ip - (const BYTE*)src;
+    *cSize = ip - (const BYTE*)src;
+    *dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;
 }
 
 /*_******************************
@@ -4146,7 +4177,7 @@
     ZSTDv07_DCtx* refContext;
 };  /* typedef'd tp ZSTDv07_CDict within zstd.h */
 
-ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
+static ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
 {
     if (!customMem.customAlloc && !customMem.customFree)
         customMem = defaultCustomMem;
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.h b/vendor/github.com/DataDog/zstd/zstd_v07.h
index 6591cd3..a566c1d 100644
--- a/vendor/github.com/DataDog/zstd/zstd_v07.h
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.h
@@ -50,12 +50,17 @@
                                     const void* src, size_t compressedSize);
 
 /**
-ZSTDv07_getFrameSrcSize() : get the source length of a ZSTD frame
-    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
-    return : the number of bytes that would be read to decompress this frame
-             or an errorCode if it fails (which can be tested using ZSTDv07_isError())
+ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format
+    srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    cSize (output parameter)  : the number of bytes that would be read to decompress this frame
+                                or an error code if it fails (which can be tested using ZSTDv01_isError())
+    dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
+                                or ZSTD_CONTENTSIZE_ERROR if an error occurs
+
+    note : assumes `cSize` and `dBound` are _not_ NULL.
 */
-size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t compressedSize);
+void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
+                                     size_t* cSize, unsigned long long* dBound);
 
 /*======  Helper functions  ======*/
 ZSTDLIBv07_API unsigned    ZSTDv07_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.c b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
old mode 100755
new mode 100644
index c7a205d..9e537b8
--- a/vendor/github.com/DataDog/zstd/zstdmt_compress.c
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
@@ -9,21 +9,20 @@
  */
 
 
-/* ======   Tuning parameters   ====== */
-#define ZSTDMT_NBWORKERS_MAX 200
-#define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (2 GB))  /* note : limited by `jobSize` type, which is `unsigned` */
-#define ZSTDMT_OVERLAPLOG_DEFAULT 6
-
-
 /* ======   Compiler specifics   ====== */
 #if defined(_MSC_VER)
 #  pragma warning(disable : 4204)   /* disable: C4204: non-constant aggregate initializer */
 #endif
 
 
+/* ======   Constants   ====== */
+#define ZSTDMT_OVERLAPLOG_DEFAULT 0
+
+
 /* ======   Dependencies   ====== */
 #include <string.h>      /* memcpy, memset */
-#include <limits.h>      /* INT_MAX */
+#include <limits.h>      /* INT_MAX, UINT_MAX */
+#include "mem.h"         /* MEM_STATIC */
 #include "pool.h"        /* threadpool */
 #include "threading.h"   /* mutex */
 #include "zstd_compress_internal.h"  /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@@ -37,18 +36,19 @@
 #define ZSTD_RESIZE_SEQPOOL 0
 
 /* ======   Debug   ====== */
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
+    && !defined(_MSC_VER) \
+    && !defined(__MINGW32__)
 
 #  include <stdio.h>
 #  include <unistd.h>
 #  include <sys/times.h>
-#  define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
 
 #  define DEBUG_PRINTHEX(l,p,n) {            \
     unsigned debug_u;                        \
     for (debug_u=0; debug_u<(n); debug_u++)  \
-        DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
-    DEBUGLOGRAW(l, " \n");                   \
+        RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+    RAWLOG(l, " \n");                        \
 }
 
 static unsigned long long GetCurrentClockTimeMicroseconds(void)
@@ -56,13 +56,13 @@
    static clock_t _ticksPerSecond = 0;
    if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
 
-   { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
-     return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); }
-}
+   {   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+       return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
+}  }
 
 #define MUTEX_WAIT_TIME_DLEVEL 6
 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) {          \
-    if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) {   \
+    if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) {   \
         unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
         ZSTD_pthread_mutex_lock(mutex);           \
         {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
@@ -160,6 +160,25 @@
     ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
 }
 
+
+static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
+{
+    unsigned const maxNbBuffers = 2*nbWorkers + 3;
+    if (srcBufPool==NULL) return NULL;
+    if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
+        return srcBufPool;
+    /* need a larger buffer pool */
+    {   ZSTD_customMem const cMem = srcBufPool->cMem;
+        size_t const bSize = srcBufPool->bufferSize;   /* forward parameters */
+        ZSTDMT_bufferPool* newBufPool;
+        ZSTDMT_freeBufferPool(srcBufPool);
+        newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+        if (newBufPool==NULL) return newBufPool;
+        ZSTDMT_setBufferSize(newBufPool, bSize);
+        return newBufPool;
+    }
+}
+
 /** ZSTDMT_getBuffer() :
  *  assumption : bufPool must be valid
  * @return : a buffer, with start pointer and size
@@ -229,8 +248,8 @@
 /* store buffer for later re-use, up to pool capacity */
 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
 {
-    if (buf.start == NULL) return;   /* compatible with release on NULL */
     DEBUGLOG(5, "ZSTDMT_releaseBuffer");
+    if (buf.start == NULL) return;   /* compatible with release on NULL */
     ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
     if (bufPool->nbBuffers < bufPool->totalBuffers) {
         bufPool->bTable[bufPool->nbBuffers++] = buf;  /* stored for later use */
@@ -300,7 +319,8 @@
 
 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
 {
-    ZSTDMT_seqPool* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    if (seqPool == NULL) return NULL;
     ZSTDMT_setNbSeq(seqPool, 0);
     return seqPool;
 }
@@ -310,6 +330,10 @@
     ZSTDMT_freeBufferPool(seqPool);
 }
 
+static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
+{
+    return ZSTDMT_expandBufferPool(pool, nbWorkers);
+}
 
 
 /* =====   CCtx Pool   ===== */
@@ -317,8 +341,8 @@
 
 typedef struct {
     ZSTD_pthread_mutex_t poolMutex;
-    unsigned totalCCtx;
-    unsigned availCCtx;
+    int totalCCtx;
+    int availCCtx;
     ZSTD_customMem cMem;
     ZSTD_CCtx* cctx[1];   /* variable size */
 } ZSTDMT_CCtxPool;
@@ -326,16 +350,16 @@
 /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
 static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
 {
-    unsigned u;
-    for (u=0; u<pool->totalCCtx; u++)
-        ZSTD_freeCCtx(pool->cctx[u]);  /* note : compatible with free on NULL */
+    int cid;
+    for (cid=0; cid<pool->totalCCtx; cid++)
+        ZSTD_freeCCtx(pool->cctx[cid]);  /* note : compatible with free on NULL */
     ZSTD_pthread_mutex_destroy(&pool->poolMutex);
     ZSTD_free(pool, pool->cMem);
 }
 
 /* ZSTDMT_createCCtxPool() :
  * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
-static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
                                               ZSTD_customMem cMem)
 {
     ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
@@ -355,6 +379,18 @@
     return cctxPool;
 }
 
+static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
+                                              int nbWorkers)
+{
+    if (srcPool==NULL) return NULL;
+    if (nbWorkers <= srcPool->totalCCtx) return srcPool;   /* good enough */
+    /* need a larger cctx pool */
+    {   ZSTD_customMem const cMem = srcPool->cMem;
+        ZSTDMT_freeCCtxPool(srcPool);
+        return ZSTDMT_createCCtxPool(nbWorkers, cMem);
+    }
+}
+
 /* only works during initialization phase, not during compression */
 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
 {
@@ -421,21 +457,20 @@
      * Must be acquired after the main mutex when acquiring both.
      */
     ZSTD_pthread_mutex_t ldmWindowMutex;
-    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is udpated */
+    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */
     ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
 } serialState_t;
 
-static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params)
+static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
 {
     /* Adjust parameters */
     if (params.ldmParams.enableLdm) {
         DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
-        params.ldmParams.windowLog = params.cParams.windowLog;
         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
-        assert(params.ldmParams.hashEveryLog < 32);
+        assert(params.ldmParams.hashRateLog < 32);
         serialState->ldmState.hashPower =
-                ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+                ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
     } else {
         memset(&params.ldmParams, 0, sizeof(params.ldmParams));
     }
@@ -453,7 +488,7 @@
             serialState->params.ldmParams.hashLog -
             serialState->params.ldmParams.bucketSizeLog;
         /* Size the seq pool tables */
-        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, params.jobSize));
+        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
         /* Reset the window */
         ZSTD_window_clear(&serialState->ldmState.window);
         serialState->ldmWindow = serialState->ldmState.window;
@@ -473,6 +508,7 @@
         memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
     }
     serialState->params = params;
+    serialState->params.jobSize = (U32)jobSize;
     return 0;
 }
 
@@ -505,6 +541,7 @@
     /* Wait for our turn */
     ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
     while (serialState->nextJobID < jobID) {
+        DEBUGLOG(5, "wait for serialState->cond");
         ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
     }
     /* A future job may error and skip our job */
@@ -514,6 +551,7 @@
             size_t error;
             assert(seqStore.seq != NULL && seqStore.pos == 0 &&
                    seqStore.size == 0 && seqStore.capacity > 0);
+            assert(src.size <= serialState->params.jobSize);
             ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
             error = ZSTD_ldm_generateSequences(
                 &serialState->ldmState, &seqStore,
@@ -593,14 +631,32 @@
     unsigned frameChecksumNeeded;        /* used only by mtctx */
 } ZSTDMT_jobDescription;
 
+#define JOB_ERROR(e) {                          \
+    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);   \
+    job->cSize = e;                             \
+    ZSTD_pthread_mutex_unlock(&job->job_mutex); \
+    goto _endJob;                               \
+}
+
 /* ZSTDMT_compressionJob() is a POOL_function type */
-void ZSTDMT_compressionJob(void* jobDescription)
+static void ZSTDMT_compressionJob(void* jobDescription)
 {
     ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
     ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */
     ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
     rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
     buffer_t dstBuff = job->dstBuff;
+    size_t lastCBlockSize = 0;
+
+    /* resources */
+    if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
+    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
+        dstBuff = ZSTDMT_getBuffer(job->bufPool);
+        if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
+        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
+    }
+    if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
+        JOB_ERROR(ERROR(memory_allocation));
 
     /* Don't compute the checksum for chunks, since we compute it externally,
      * but write it in the header.
@@ -609,47 +665,31 @@
     /* Don't run LDM for the chunks, since we handle it externally */
     jobParams.ldmParams.enableLdm = 0;
 
-    /* ressources */
-    if (cctx==NULL) {
-        job->cSize = ERROR(memory_allocation);
-        goto _endJob;
-    }
-    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
-        dstBuff = ZSTDMT_getBuffer(job->bufPool);
-        if (dstBuff.start==NULL) {
-            job->cSize = ERROR(memory_allocation);
-            goto _endJob;
-        }
-        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
-    }
 
     /* init */
     if (job->cdict) {
-        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, job->cdict, jobParams, job->fullFrameSize);
+        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
         assert(job->firstJob);  /* only allowed for first job */
-        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+        if (ZSTD_isError(initError)) JOB_ERROR(initError);
     } else {  /* srcStart points at reloaded section */
         U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
-        {   size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstJob);
-            if (ZSTD_isError(forceWindowError)) {
-                job->cSize = forceWindowError;
-                goto _endJob;
-        }   }
+        {   size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
+            if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
+        }
         {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
                                         job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+                                        ZSTD_dtlm_fast,
                                         NULL, /*cdict*/
                                         jobParams, pledgedSrcSize);
-            if (ZSTD_isError(initError)) {
-                job->cSize = initError;
-                goto _endJob;
-    }   }   }
+            if (ZSTD_isError(initError)) JOB_ERROR(initError);
+    }   }
 
     /* Perform serial step as early as possible, but after CCtx initialization */
     ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
 
     if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */
         size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
-        if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
+        if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
         DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
         ZSTD_invalidateRepCodes(cctx);
     }
@@ -667,7 +707,7 @@
         assert(job->cSize == 0);
         for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
             size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
-            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
+            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
             ip += chunkSize;
             op += cSize; assert(op < oend);
             /* stats */
@@ -680,18 +720,16 @@
             ZSTD_pthread_mutex_unlock(&job->job_mutex);
         }
         /* last block */
-        assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
+        assert(chunkSize > 0);
+        assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
         if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
             size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
             size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
             size_t const cSize = (job->lastJob) ?
                  ZSTD_compressEnd     (cctx, op, oend-op, ip, lastBlockSize) :
                  ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
-            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
-            /* stats */
-            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-            job->cSize += cSize;
-            ZSTD_pthread_mutex_unlock(&job->job_mutex);
+            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
+            lastCBlockSize = cSize;
     }   }
 
 _endJob:
@@ -704,7 +742,9 @@
     ZSTDMT_releaseCCtx(job->cctxPool, cctx);
     /* report */
     ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-    job->consumed = job->src.size;
+    if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
+    job->cSize += lastCBlockSize;
+    job->consumed = job->src.size;  /* when job->consumed == job->src.size , compression job is presumed completed */
     ZSTD_pthread_cond_signal(&job->job_cond);
     ZSTD_pthread_mutex_unlock(&job->job_mutex);
 }
@@ -736,6 +776,14 @@
 
 static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
 
+#define RSYNC_LENGTH 32
+
+typedef struct {
+  U64 hash;
+  U64 hitMask;
+  U64 primePower;
+} rsyncState_t;
+
 struct ZSTDMT_CCtx_s {
     POOL_ctx* factory;
     ZSTDMT_jobDescription* jobs;
@@ -745,10 +793,11 @@
     ZSTD_CCtx_params params;
     size_t targetSectionSize;
     size_t targetPrefixSize;
-    roundBuff_t roundBuff;
+    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
     inBuff_t inBuff;
-    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
+    roundBuff_t roundBuff;
     serialState_t serial;
+    rsyncState_t rsync;
     unsigned singleBlockingThread;
     unsigned jobIDMask;
     unsigned doneJobID;
@@ -798,18 +847,28 @@
     return jobTable;
 }
 
+static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
+    U32 nbJobs = nbWorkers + 2;
+    if (nbJobs > mtctx->jobIDMask+1) {  /* need more job capacity */
+        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+        mtctx->jobIDMask = 0;
+        mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
+        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
+        assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0));  /* ensure nbJobs is a power of 2 */
+        mtctx->jobIDMask = nbJobs - 1;
+    }
+    return 0;
+}
+
+
 /* ZSTDMT_CCtxParam_setNbWorkers():
  * Internal use only */
 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
 {
-    if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
-    params->nbWorkers = nbWorkers;
-    params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT;
-    params->jobSize = 0;
-    return nbWorkers;
+    return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
 }
 
-ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
 {
     ZSTDMT_CCtx* mtctx;
     U32 nbJobs = nbWorkers + 2;
@@ -844,6 +903,17 @@
     return mtctx;
 }
 
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+#ifdef ZSTD_MULTITHREAD
+    return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
+#else
+    (void)nbWorkers;
+    (void)cMem;
+    return NULL;
+#endif
+}
+
 ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
 {
     return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
@@ -875,7 +945,7 @@
         unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
         ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
         while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
-            DEBUGLOG(5, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
+            DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
             ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
         }
         ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
@@ -914,38 +984,44 @@
 }
 
 /* Internal only */
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
-                                ZSTDMT_parameter parameter, unsigned value) {
+size_t
+ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
+                                   ZSTDMT_parameter parameter,
+                                   int value)
+{
     DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
     switch(parameter)
     {
     case ZSTDMT_p_jobSize :
-        DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %u", value);
-        if ( (value > 0)  /* value==0 => automatic job size */
-           & (value < ZSTDMT_JOBSIZE_MIN) )
-            value = ZSTDMT_JOBSIZE_MIN;
-        params->jobSize = value;
-        return value;
-    case ZSTDMT_p_overlapSectionLog :
-        if (value > 9) value = 9;
-        DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
-        params->overlapSizeLog = (value >= 9) ? 9 : value;
-        return value;
+        DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
+    case ZSTDMT_p_overlapLog :
+        DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
+    case ZSTDMT_p_rsyncable :
+        DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
+        return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
     default :
         return ERROR(parameter_unsupported);
     }
 }
 
-size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value)
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
 {
     DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
-    switch(parameter)
-    {
-    case ZSTDMT_p_jobSize :
-        return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
-    case ZSTDMT_p_overlapSectionLog :
-        return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
-    default :
+    return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+}
+
+size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
+{
+    switch (parameter) {
+    case ZSTDMT_p_jobSize:
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
+    case ZSTDMT_p_overlapLog:
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
+    case ZSTDMT_p_rsyncable:
+        return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
+    default:
         return ERROR(parameter_unsupported);
     }
 }
@@ -954,19 +1030,38 @@
  * initializing others to default values. */
 static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
 {
-    ZSTD_CCtx_params jobParams;
-    memset(&jobParams, 0, sizeof(jobParams));
-
-    jobParams.cParams = params.cParams;
-    jobParams.fParams = params.fParams;
-    jobParams.compressionLevel = params.compressionLevel;
-    jobParams.disableLiteralCompression = params.disableLiteralCompression;
-
+    ZSTD_CCtx_params jobParams = params;
+    /* Clear parameters related to multithreading */
+    jobParams.forceWindow = 0;
+    jobParams.nbWorkers = 0;
+    jobParams.jobSize = 0;
+    jobParams.overlapLog = 0;
+    jobParams.rsyncable = 0;
+    memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
+    memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
     return jobParams;
 }
 
+
+/* ZSTDMT_resize() :
+ * @return : error code if fails, 0 on success */
+static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
+{
+    if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
+    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+    mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
+    if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
+    mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
+    if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
+    mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
+    if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
+    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+    return 0;
+}
+
+
 /*! ZSTDMT_updateCParams_whileCompressing() :
- *  Updates only a selected set of compression parameters, to remain compatible with current frame.
+ *  Updates a selected set of compression parameters, remaining compatible with currently active frame.
  *  New parameters will be applied to next compression job. */
 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
 {
@@ -981,38 +1076,36 @@
     }
 }
 
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
-{
-    assert(mtctx != NULL);
-    return mtctx->params.nbWorkers;
-}
-
 /* ZSTDMT_getFrameProgression():
  * tells how much data has been consumed (input) and produced (output) for current frame.
  * able to count progression inside worker threads.
- * Note : mutex will be acquired during statistics collection. */
+ * Note : mutex will be acquired during statistics collection inside workers. */
 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
 {
     ZSTD_frameProgression fps;
-    DEBUGLOG(6, "ZSTDMT_getFrameProgression");
-    fps.consumed = mtctx->consumed;
-    fps.produced = mtctx->produced;
+    DEBUGLOG(5, "ZSTDMT_getFrameProgression");
     fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
+    fps.consumed = mtctx->consumed;
+    fps.produced = fps.flushed = mtctx->produced;
+    fps.currentJobID = mtctx->nextJobID;
+    fps.nbActiveWorkers = 0;
     {   unsigned jobNb;
         unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
         DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
                     mtctx->doneJobID, lastJobNb, mtctx->jobReady)
         for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
             unsigned const wJobID = jobNb & mtctx->jobIDMask;
-            ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
-            {   size_t const cResult = mtctx->jobs[wJobID].cSize;
+            ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
+            ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+            {   size_t const cResult = jobPtr->cSize;
                 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
-                fps.consumed += mtctx->jobs[wJobID].consumed;
-                fps.ingested += mtctx->jobs[wJobID].src.size;
+                size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+                assert(flushed <= produced);
+                fps.ingested += jobPtr->src.size;
+                fps.consumed += jobPtr->consumed;
                 fps.produced += produced;
+                fps.flushed  += flushed;
+                fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
             }
             ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
         }
@@ -1021,26 +1114,107 @@
 }
 
 
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
+{
+    size_t toFlush;
+    unsigned const jobID = mtctx->doneJobID;
+    assert(jobID <= mtctx->nextJobID);
+    if (jobID == mtctx->nextJobID) return 0;   /* no active job => nothing to flush */
+
+    /* look into oldest non-fully-flushed job */
+    {   unsigned const wJobID = jobID & mtctx->jobIDMask;
+        ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
+        ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+        {   size_t const cResult = jobPtr->cSize;
+            size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+            size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+            assert(flushed <= produced);
+            assert(jobPtr->consumed <= jobPtr->src.size);
+            toFlush = produced - flushed;
+            /* if toFlush==0, nothing is available to flush.
+             * However, jobID is expected to still be active:
+             * if jobID was already completed and fully flushed,
+             * ZSTDMT_flushProduced() should have already moved onto next job.
+             * Therefore, some input has not yet been consumed. */
+            if (toFlush==0) {
+                assert(jobPtr->consumed < jobPtr->src.size);
+            }
+        }
+        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+    }
+
+    return toFlush;
+}
+
+
 /* ------------------------------------------ */
 /* =====   Multi-threaded compression   ===== */
 /* ------------------------------------------ */
 
-static size_t ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
+static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
 {
-    if (params.ldmParams.enableLdm)
-        return MAX(21, params.cParams.chainLog + 4);
-    return MAX(20, params.cParams.windowLog + 2);
+    unsigned jobLog;
+    if (params.ldmParams.enableLdm) {
+        /* In Long Range Mode, the windowLog is typically oversized.
+         * In which case, it's preferable to determine the jobSize
+         * based on chainLog instead. */
+        jobLog = MAX(21, params.cParams.chainLog + 4);
+    } else {
+        jobLog = MAX(20, params.cParams.windowLog + 2);
+    }
+    return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
 }
 
-static size_t ZSTDMT_computeOverlapLog(ZSTD_CCtx_params const params)
+static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
 {
-    unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
-    if (params.ldmParams.enableLdm)
-        return (MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog);
-    return overlapRLog >= 9 ? 0 : (params.cParams.windowLog - overlapRLog);
+    switch(strat)
+    {
+        case ZSTD_btultra2:
+            return 9;
+        case ZSTD_btultra:
+        case ZSTD_btopt:
+            return 8;
+        case ZSTD_btlazy2:
+        case ZSTD_lazy2:
+            return 7;
+        case ZSTD_lazy:
+        case ZSTD_greedy:
+        case ZSTD_dfast:
+        case ZSTD_fast:
+        default:;
+    }
+    return 6;
 }
 
-static unsigned ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) {
+static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
+{
+    assert(0 <= ovlog && ovlog <= 9);
+    if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
+    return ovlog;
+}
+
+static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
+{
+    int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
+    int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
+    assert(0 <= overlapRLog && overlapRLog <= 8);
+    if (params.ldmParams.enableLdm) {
+        /* In Long Range Mode, the windowLog is typically oversized.
+         * In which case, it's preferable to determine the jobSize
+         * based on chainLog instead.
+         * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
+        ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
+                - overlapRLog;
+    }
+    assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
+    DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
+    DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
+    return (ovLog==0) ? 0 : (size_t)1 << ovLog;
+}
+
+static unsigned
+ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
+{
     assert(nbWorkers>0);
     {   size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
         size_t const jobMaxSize = jobSizeTarget << 2;
@@ -1063,7 +1237,7 @@
                 ZSTD_CCtx_params params)
 {
     ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
-    size_t const overlapSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
+    size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
     unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
     size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
     size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize;   /* avoid too small last block */
@@ -1087,18 +1261,10 @@
 
     assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
     ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
         return ERROR(memory_allocation);
 
-    if (nbJobs > mtctx->jobIDMask+1) {  /* enlarge job table */
-        U32 jobsTableSize = nbJobs;
-        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
-        mtctx->jobIDMask = 0;
-        mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
-        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
-        assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0));  /* ensure jobsTableSize is a power of 2 */
-        mtctx->jobIDMask = jobsTableSize - 1;
-    }
+    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */
 
     {   unsigned u;
         for (u=0; u<nbJobs; u++) {
@@ -1182,16 +1348,17 @@
 }
 
 size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                         const ZSTD_CDict* cdict,
-                               ZSTD_parameters params,
-                               unsigned overlapLog)
+                                void* dst, size_t dstCapacity,
+                          const void* src, size_t srcSize,
+                          const ZSTD_CDict* cdict,
+                                ZSTD_parameters params,
+                                int overlapLog)
 {
     ZSTD_CCtx_params cctxParams = mtctx->params;
     cctxParams.cParams = params.cParams;
     cctxParams.fParams = params.fParams;
-    cctxParams.overlapSizeLog = overlapLog;
+    assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
+    cctxParams.overlapLog = overlapLog;
     return ZSTDMT_compress_advanced_internal(mtctx,
                                              dst, dstCapacity,
                                              src, srcSize,
@@ -1204,8 +1371,8 @@
                      const void* src, size_t srcSize,
                            int compressionLevel)
 {
-    U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT;
     ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+    int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
     params.fParams.contentSizeFlag = 1;
     return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
 }
@@ -1221,18 +1388,19 @@
         const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
         unsigned long long pledgedSrcSize)
 {
-    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u, disableLiteralCompression=%i)",
-                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx, params.disableLiteralCompression);
-    /* params are supposed to be fully validated at this point */
+    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
+                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
+
+    /* params supposed partially fully validated at this point */
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
 
     /* init */
-    if (params.jobSize == 0) {
-        params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
-    }
-    if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
+    if (params.nbWorkers != mtctx->params.nbWorkers)
+        FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
+
+    if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
+    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
 
     mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
     if (mtctx->singleBlockingThread) {
@@ -1267,12 +1435,26 @@
         mtctx->cdict = cdict;
     }
 
-    mtctx->targetPrefixSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
-    DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(mtctx->targetPrefixSize>>10));
+    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
+    DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
     mtctx->targetSectionSize = params.jobSize;
-    if (mtctx->targetSectionSize < ZSTDMT_JOBSIZE_MIN) mtctx->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
+    if (mtctx->targetSectionSize == 0) {
+        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
+    }
+    assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
+
+    if (params.rsyncable) {
+        /* Aim for the targetsectionSize as the average job size. */
+        U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
+        U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
+        assert(jobSizeMB >= 1);
+        DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
+        mtctx->rsync.hash = 0;
+        mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
+        mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
+    }
     if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */
-    DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), params.jobSize);
+    DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
     DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
     ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
     {
@@ -1312,7 +1494,7 @@
     mtctx->allJobsCompleted = 0;
     mtctx->consumed = 0;
     mtctx->produced = 0;
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
         return ERROR(memory_allocation);
     return 0;
 }
@@ -1368,7 +1550,7 @@
 /* ZSTDMT_writeLastEmptyBlock()
  * Write a single empty block with an end-of-frame to finish a frame.
  * Job must be created from streaming variant.
- * This function is always successfull if expected conditions are fulfilled.
+ * This function is always successful if expected conditions are fulfilled.
  */
 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
 {
@@ -1420,7 +1602,7 @@
         mtctx->jobs[jobID].jobID = mtctx->nextJobID;
         mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
         mtctx->jobs[jobID].lastJob = endFrame;
-        mtctx->jobs[jobID].frameChecksumNeeded = endFrame && (mtctx->nextJobID>0) && mtctx->params.fParams.checksumFlag;
+        mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
         mtctx->jobs[jobID].dstFlushed = 0;
 
         /* Update the round buffer pos and clear the input buffer to be reset */
@@ -1468,6 +1650,8 @@
 
 
 /*! ZSTDMT_flushProduced() :
+ *  flush whatever data has been produced but not yet flushed in current job.
+ *  move to next job if current one is fully flushed.
  * `output` : `pos` will be updated with amount of data flushed .
  * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
  * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
@@ -1496,7 +1680,7 @@
     /* try to flush something */
     {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */
         size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */
-        size_t const srcSize = mtctx->jobs[wJobID].src.size;        /* read-only, could be done after mutex lock, but no-declaration-after-statement */
+        size_t const srcSize = mtctx->jobs[wJobID].src.size;       /* read-only, could be done after mutex lock, but no-declaration-after-statement */
         ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
         if (ZSTD_isError(cSize)) {
             DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
@@ -1516,6 +1700,7 @@
             mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */
             mtctx->jobs[wJobID].frameChecksumNeeded = 0;
         }
+
         if (cSize > 0) {   /* compression is ongoing or completed */
             size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
             DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
@@ -1529,11 +1714,12 @@
             output->pos += toFlush;
             mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */
 
-            if ( (srcConsumed == srcSize)    /* job completed */
+            if ( (srcConsumed == srcSize)    /* job is completed */
               && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */
                 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
                         mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
                 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+                DEBUGLOG(5, "dstBuffer released");
                 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
                 mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered "not started" in future check */
                 mtctx->consumed += srcSize;
@@ -1610,6 +1796,7 @@
     range_t extDict;
     range_t prefix;
 
+    DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
     extDict.start = window.dictBase + window.lowLimit;
     extDict.size = window.dictLimit - window.lowLimit;
 
@@ -1630,12 +1817,13 @@
 {
     if (mtctx->params.ldmParams.enableLdm) {
         ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+        DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
         DEBUGLOG(5, "source  [0x%zx, 0x%zx)",
                     (size_t)buffer.start,
                     (size_t)buffer.start + buffer.capacity);
         ZSTD_PTHREAD_MUTEX_LOCK(mutex);
         while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
-            DEBUGLOG(6, "Waiting for LDM to finish...");
+            DEBUGLOG(5, "Waiting for LDM to finish...");
             ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
         }
         DEBUGLOG(6, "Done waiting for LDM to finish");
@@ -1655,6 +1843,7 @@
     size_t const target = mtctx->targetSectionSize;
     buffer_t buffer;
 
+    DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
     assert(mtctx->inBuff.buffer.start == NULL);
     assert(mtctx->roundBuff.capacity >= target);
 
@@ -1668,7 +1857,7 @@
         buffer.start = start;
         buffer.capacity = prefixSize;
         if (ZSTDMT_isOverlapped(buffer, inUse)) {
-            DEBUGLOG(6, "Waiting for buffer...");
+            DEBUGLOG(5, "Waiting for buffer...");
             return 0;
         }
         ZSTDMT_waitForLdmComplete(mtctx, buffer);
@@ -1680,7 +1869,7 @@
     buffer.capacity = target;
 
     if (ZSTDMT_isOverlapped(buffer, inUse)) {
-        DEBUGLOG(6, "Waiting for buffer...");
+        DEBUGLOG(5, "Waiting for buffer...");
         return 0;
     }
     assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
@@ -1701,6 +1890,89 @@
     return 1;
 }
 
+typedef struct {
+  size_t toLoad;  /* The number of bytes to load from the input. */
+  int flush;      /* Boolean declaring if we must flush because we found a synchronization point. */
+} syncPoint_t;
+
+/**
+ * Searches through the input for a synchronization point. If one is found, we
+ * will instruct the caller to flush, and return the number of bytes to load.
+ * Otherwise, we will load as many bytes as possible and instruct the caller
+ * to continue as normal.
+ */
+static syncPoint_t
+findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
+{
+    BYTE const* const istart = (BYTE const*)input.src + input.pos;
+    U64 const primePower = mtctx->rsync.primePower;
+    U64 const hitMask = mtctx->rsync.hitMask;
+
+    syncPoint_t syncPoint;
+    U64 hash;
+    BYTE const* prev;
+    size_t pos;
+
+    syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+    syncPoint.flush = 0;
+    if (!mtctx->params.rsyncable)
+        /* Rsync is disabled. */
+        return syncPoint;
+    if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
+        /* Not enough to compute the hash.
+         * We will miss any synchronization points in this RSYNC_LENGTH byte
+         * window. However, since it depends only in the internal buffers, if the
+         * state is already synchronized, we will remain synchronized.
+         * Additionally, the probability that we miss a synchronization point is
+         * low: RSYNC_LENGTH / targetSectionSize.
+         */
+        return syncPoint;
+    /* Initialize the loop variables. */
+    if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
+        /* We have enough bytes buffered to initialize the hash.
+         * Start scanning at the beginning of the input.
+         */
+        pos = 0;
+        prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
+        hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
+    } else {
+        /* We don't have enough bytes buffered to initialize the hash, but
+         * we know we have at least RSYNC_LENGTH bytes total.
+         * Start scanning after the first RSYNC_LENGTH bytes less the bytes
+         * already buffered.
+         */
+        pos = RSYNC_LENGTH - mtctx->inBuff.filled;
+        prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
+        hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
+        hash = ZSTD_rollingHash_append(hash, istart, pos);
+    }
+    /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
+     * through the input. If we hit a synchronization point, then cut the
+     * job off, and tell the compressor to flush the job. Otherwise, load
+     * all the bytes and continue as normal.
+     * If we go too long without a synchronization point (targetSectionSize)
+     * then a block will be emitted anyways, but this is okay, since if we
+     * are already synchronized we will remain synchronized.
+     */
+    for (; pos < syncPoint.toLoad; ++pos) {
+        BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
+        /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
+        hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
+        if ((hash & hitMask) == hitMask) {
+            syncPoint.toLoad = pos + 1;
+            syncPoint.flush = 1;
+            break;
+        }
+    }
+    return syncPoint;
+}
+
+size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
+{
+    size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
+    if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
+    return hintInSize;
+}
 
 /** ZSTDMT_compressStream_generic() :
  *  internal use only - exposed to be invoked from zstd_compress.c
@@ -1718,7 +1990,7 @@
     assert(input->pos  <= input->size);
 
     if (mtctx->singleBlockingThread) {  /* delegate to single-thread (synchronous) */
-        return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
+        return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
     }
 
     if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
@@ -1727,7 +1999,8 @@
     }
 
     /* single-pass shortcut (note : synchronous-mode) */
-    if ( (mtctx->nextJobID == 0)      /* just started */
+    if ( (!mtctx->params.rsyncable)   /* rsyncable mode is disabled */
+      && (mtctx->nextJobID == 0)      /* just started */
       && (mtctx->inBuff.filled == 0)  /* nothing buffered */
       && (!mtctx->jobReady)           /* no job already created */
       && (endOp == ZSTD_e_end)        /* end order */
@@ -1753,18 +2026,23 @@
                 /* It is only possible for this operation to fail if there are
                  * still compression jobs ongoing.
                  */
+                DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
                 assert(mtctx->doneJobID != mtctx->nextJobID);
-            }
+            } else
+                DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
         }
         if (mtctx->inBuff.buffer.start != NULL) {
-            size_t const toLoad = MIN(input->size - input->pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+            syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
+            if (syncPoint.flush && endOp == ZSTD_e_continue) {
+                endOp = ZSTD_e_flush;
+            }
             assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
             DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
-                        (U32)toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
-            memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
-            input->pos += toLoad;
-            mtctx->inBuff.filled += toLoad;
-            forwardInputProgress = toLoad>0;
+                        (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
+            memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
+            input->pos += syncPoint.toLoad;
+            mtctx->inBuff.filled += syncPoint.toLoad;
+            forwardInputProgress = syncPoint.toLoad>0;
         }
         if ((input->pos < input->size) && (endOp == ZSTD_e_end))
             endOp = ZSTD_e_flush;   /* can't end now : not all input consumed */
@@ -1776,12 +2054,13 @@
       || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */
         size_t const jobSize = mtctx->inBuff.filled;
         assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
-        CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
+        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
     }
 
     /* check for potential compressed data ready to be flushed */
     {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
         if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */
+        DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
         return remainingToFlush;
     }
 }
@@ -1789,7 +2068,7 @@
 
 size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
 {
-    CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
+    FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
 
     /* recommended next input size : fill current input buffer */
     return mtctx->targetSectionSize - mtctx->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@@ -1806,7 +2085,7 @@
       || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) {  /* need a last 0-size block to end frame */
            DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
                         (U32)srcSize, (U32)endFrame);
-        CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
+        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
     }
 
     /* check if there is any data available to flush */
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.h b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
old mode 100755
new mode 100644
index f79e3b4..12a5260
--- a/vendor/github.com/DataDog/zstd/zstdmt_compress.h
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
@@ -17,10 +17,25 @@
 
 
 /* Note : This is an internal API.
- *        Some methods are still exposed (ZSTDLIB_API),
+ *        These APIs used to be exposed with ZSTDLIB_API,
  *        because it used to be the only way to invoke MT compression.
- *        Now, it's recommended to use ZSTD_compress_generic() instead.
- *        These methods will stop being exposed in a future version */
+ *        Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
+ *        instead.
+ *
+ *        If you depend on these APIs and can't switch, then define
+ *        ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
+ *        However, we may completely remove these functions in a future
+ *        release, so please switch soon.
+ *
+ *        This API requires ZSTD_MULTITHREAD to be defined during compilation,
+ *        otherwise ZSTDMT_createCCtx*() will fail.
+ */
+
+#ifdef ZSTD_LEGACY_MULTITHREADED_API
+#  define ZSTDMT_API ZSTDLIB_API
+#else
+#  define ZSTDMT_API
+#endif
 
 /* ===   Dependencies   === */
 #include <stddef.h>                /* size_t */
@@ -28,19 +43,32 @@
 #include "zstd.h"            /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
 
 
+/* ===   Constants   === */
+#ifndef ZSTDMT_NBWORKERS_MAX
+#  define ZSTDMT_NBWORKERS_MAX 200
+#endif
+#ifndef ZSTDMT_JOBSIZE_MIN
+#  define ZSTDMT_JOBSIZE_MIN (1 MB)
+#endif
+#define ZSTDMT_JOBLOG_MAX   (MEM_32bits() ? 29 : 30)
+#define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (1024 MB))
+
+
 /* ===   Memory management   === */
 typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
                                                     ZSTD_customMem cMem);
-ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
 
-ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
 
 
 /* ===   Simple one-pass compression function   === */
 
-ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                                        void* dst, size_t dstCapacity,
                                  const void* src, size_t srcSize,
                                        int compressionLevel);
@@ -49,34 +77,31 @@
 
 /* ===   Streaming functions   === */
 
-ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
-ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
 
-ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
 
-ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
 
 
 /* ===   Advanced functions and parameters  === */
 
-#ifndef ZSTDMT_JOBSIZE_MIN
-#  define ZSTDMT_JOBSIZE_MIN (1U << 20)   /* 1 MB - Minimum size of each compression job */
-#endif
+ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+                                          void* dst, size_t dstCapacity,
+                                    const void* src, size_t srcSize,
+                                    const ZSTD_CDict* cdict,
+                                          ZSTD_parameters params,
+                                          int overlapLog);
 
-ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
-                                           void* dst, size_t dstCapacity,
-                                     const void* src, size_t srcSize,
-                                     const ZSTD_CDict* cdict,
-                                           ZSTD_parameters params,
-                                           unsigned overlapLog);
-
-ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
                                         const void* dict, size_t dictSize,   /* dict can be released after init, a local copy is preserved within zcs */
                                         ZSTD_parameters params,
                                         unsigned long long pledgedSrcSize);  /* pledgedSrcSize is optional and can be zero == unknown */
 
-ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
                                         const ZSTD_CDict* cdict,
                                         ZSTD_frameParameters fparams,
                                         unsigned long long pledgedSrcSize);  /* note : zero means empty */
@@ -84,8 +109,9 @@
 /* ZSTDMT_parameter :
  * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
 typedef enum {
-    ZSTDMT_p_jobSize,           /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
-    ZSTDMT_p_overlapSectionLog  /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+    ZSTDMT_p_jobSize,     /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
+    ZSTDMT_p_overlapLog,  /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+    ZSTDMT_p_rsyncable    /* Enables rsyncable mode. */
 } ZSTDMT_parameter;
 
 /* ZSTDMT_setMTCtxParameter() :
@@ -93,7 +119,12 @@
  * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
  * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
  * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
+ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
+
+/* ZSTDMT_getMTCtxParameter() :
+ * Query the ZSTDMT_CCtx for a parameter value.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
 
 
 /*! ZSTDMT_compressStream_generic() :
@@ -103,7 +134,7 @@
  *           0 if fully flushed
  *           or an error code
  *  note : needs to be init using any ZSTD_initCStream*() variant */
-ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
                                                 ZSTD_outBuffer* output,
                                                 ZSTD_inBuffer* input,
                                                 ZSTD_EndDirective endOp);
@@ -114,11 +145,21 @@
  * ===  Not exposed in libzstd. Never invoke directly   ===
  * ======================================================== */
 
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
+ /*! ZSTDMT_toFlushNow()
+  *  Tell how many bytes are ready to be flushed immediately.
+  *  Probe the oldest active job (not yet entirely flushed) and check its output buffer.
+  *  If return 0, it means there is no active job,
+  *  or, it means oldest job is still active, but everything produced has been flushed so far,
+  *  therefore flushing is limited by speed of oldest job. */
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
 
-/* ZSTDMT_CCtxParam_setNbWorkers()
- * Set nbWorkers, and clamp it.
- * Also reset jobSize and overlapLog */
+/*! ZSTDMT_CCtxParam_setMTCtxParameter()
+ *  like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
+
+/*! ZSTDMT_CCtxParam_setNbWorkers()
+ *  Set nbWorkers, and clamp it.
+ *  Also reset jobSize and overlapLog */
 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
 
 /*! ZSTDMT_updateCParams_whileCompressing() :
@@ -126,14 +167,9 @@
  *  New parameters will be applied to next compression job. */
 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
 
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
-
-/* ZSTDMT_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads.
+/*! ZSTDMT_getFrameProgression():
+ *  tells how much data has been consumed (input) and produced (output) for current frame.
+ *  able to count progression inside worker threads.
  */
 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
 
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
index b7f7874..4331fa1 100644
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -1,7 +1,8 @@
+dist: xenial
 language: go
 go:
-- 1.10.x
 - 1.11.x
+- 1.12.x
 
 env:
   global:
@@ -11,15 +12,16 @@
   - KAFKA_HOSTNAME=localhost
   - DEBUG=true
   matrix:
-  - KAFKA_VERSION=1.1.1 KAFKA_SCALA_VERSION=2.11
-  - KAFKA_VERSION=2.0.1 KAFKA_SCALA_VERSION=2.12
-  - KAFKA_VERSION=2.1.0 KAFKA_SCALA_VERSION=2.12
+  - KAFKA_VERSION=2.1.1 KAFKA_SCALA_VERSION=2.12
+  - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
+  - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
 
 before_install:
 - export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
 - vagrant/install_cluster.sh
 - vagrant/boot_cluster.sh
 - vagrant/create_topics.sh
+- vagrant/run_java_producer.sh
 
 install: make install_dependencies
 
@@ -27,7 +29,7 @@
 - make test
 - make vet
 - make errcheck
-- if [[ "$TRAVIS_GO_VERSION" == 1.11* ]]; then make fmt; fi
+- if [[ "$TRAVIS_GO_VERSION" == 1.12* ]]; then make fmt; fi
 
 after_success:
 - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
index 0e69c2d..02bd0ff 100644
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -1,5 +1,109 @@
 # Changelog
 
+#### Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+  ([1425](https://github.com/Shopify/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+  ([1428](https://github.com/Shopify/sarama/pull/1428)).
+
+#### Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+  ([1418](https://github.com/Shopify/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+  ([1374](https://github.com/Shopify/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+  ([1417](https://github.com/Shopify/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+  ([1410](https://github.com/Shopify/sarama/pull/1410)).
+- Add kerberos support
+  ([1366](https://github.com/Shopify/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+  ([1406](https://github.com/Shopify/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+  ([1404](https://github.com/Shopify/sarama/pull/1404)).
+- Add support for error codes 77—81
+  ([1397](https://github.com/Shopify/sarama/pull/1397)).
+- Pool internal objects allocated per message
+  ([1385](https://github.com/Shopify/sarama/pull/1385)).
+- Reduce packet decoder allocations
+  ([1373](https://github.com/Shopify/sarama/pull/1373)).
+- Support timeout when fetching metadata
+  ([1359](https://github.com/Shopify/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+  ([1376](https://github.com/Shopify/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+  ([1383](https://github.com/Shopify/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+  ([1368](https://github.com/Shopify/sarama/pull/1368)).
+
+#### Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+  ([1350](https://github.com/Shopify/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+  ([1354](https://github.com/Shopify/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+  ([1353](https://github.com/Shopify/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+  ([1349](https://github.com/Shopify/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+  ([1344](https://github.com/Shopify/sarama/pull/1344)).
+
+#### Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+  ([1318](https://github.com/Shopify/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+  ([1326](https://github.com/Shopify/sarama/pull/1326)).
+- Implement ReadCommitted
+  ([1307](https://github.com/Shopify/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+  ([1331](https://github.com/Shopify/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+  ([1331](https://github.com/Shopify/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+  ([1232](https://github.com/Shopify/sarama/pull/1232)).
+- Add SCRAM authentication example
+  ([1303](https://github.com/Shopify/sarama/pull/1303)).
+- Add consumergroup examples
+  ([1304](https://github.com/Shopify/sarama/pull/1304)).
+- Expose consumer batch size metric
+  ([1296](https://github.com/Shopify/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+  ([1300](https://github.com/Shopify/sarama/pull/1300)).
+- Reduce client close bookkeeping
+  ([1297](https://github.com/Shopify/sarama/pull/1297)).
+- Satisfy error interface in create responses
+  ([1154](https://github.com/Shopify/sarama/pull/1154)).
+- Please lint gods
+  ([1346](https://github.com/Shopify/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+  ([1338](https://github.com/Shopify/sarama/pull/1338)).
+- Update lz4 to latest version
+  ([1347](https://github.com/Shopify/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+  ([1231](https://github.com/Shopify/sarama/pull/1231)).
+- Fix cleanup error handler
+  ([1332](https://github.com/Shopify/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+  ([1156](https://github.com/Shopify/sarama/pull/1156)).
+
 #### Version 1.21.0 (2019-02-24)
 
 New Features:
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
index 09f743c..360b220 100644
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -1,11 +1,12 @@
 export GO111MODULE=on
 
-default: fmt vet errcheck test
+default: fmt vet errcheck test lint
 
 # Taken from https://github.com/codecov/example-go#caveat-multiple-files
+.PHONY: test
 test:
 	echo "" > coverage.txt
-	for d in `go list ./... | grep -v vendor`; do \
+	for d in `go list ./...`; do \
 		go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
 		if [ -f profile.out ]; then \
 			cat profile.out >> coverage.txt; \
@@ -13,20 +14,39 @@
 		fi \
 	done
 
+GOLINT := $(shell command -v golint)
+
+.PHONY: lint
+lint:
+ifndef GOLINT
+	go get golang.org/x/lint/golint
+endif
+	go list ./... | xargs golint
+
+.PHONY: vet
 vet:
 	go vet ./...
 
+ERRCHECK := $(shell command -v errcheck)
 # See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
+.PHONY: errcheck
 errcheck:
+ifndef ERRCHECK
+	go get github.com/kisielk/errcheck
+endif
 	errcheck -ignorepkg fmt github.com/Shopify/sarama/...
 
+.PHONY: fmt
 fmt:
 	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
 
-install_dependencies: install_errcheck get
+.PHONY : install_dependencies
+install_dependencies: get
 
-install_errcheck:
-	go get github.com/kisielk/errcheck
-
+.PHONY: get
 get:
-	go get -t
+	go get -t -v ./...
+
+.PHONY: clean
+clean:
+	go clean ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
index f241b89..4cd736b 100644
--- a/vendor/github.com/Shopify/sarama/README.md
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -21,7 +21,7 @@
 Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
 the two latest stable releases of Kafka and Go, and we provide a two month
 grace period for older releases. This means we currently officially support
-Go 1.8 through 1.11, and Kafka 1.0 through 2.0, although older releases are
+Go 1.11 through 1.12, and Kafka 2.0 through 2.3, although older releases are
 still likely to work.
 
 Sarama follows semantic versioning and provides API stability via the gopkg.in service.
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
index b91c282..50b689d 100644
--- a/vendor/github.com/Shopify/sarama/acl_bindings.go
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -1,9 +1,10 @@
 package sarama
 
+//Resource holds information about acl resource type
 type Resource struct {
-	ResourceType       AclResourceType
-	ResourceName       string
-	ResoucePatternType AclResourcePatternType
+	ResourceType        AclResourceType
+	ResourceName        string
+	ResourcePatternType AclResourcePatternType
 }
 
 func (r *Resource) encode(pe packetEncoder, version int16) error {
@@ -14,11 +15,11 @@
 	}
 
 	if version == 1 {
-		if r.ResoucePatternType == AclPatternUnknown {
+		if r.ResourcePatternType == AclPatternUnknown {
 			Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
-			r.ResoucePatternType = AclPatternLiteral
+			r.ResourcePatternType = AclPatternLiteral
 		}
-		pe.putInt8(int8(r.ResoucePatternType))
+		pe.putInt8(int8(r.ResourcePatternType))
 	}
 
 	return nil
@@ -39,12 +40,13 @@
 		if err != nil {
 			return err
 		}
-		r.ResoucePatternType = AclResourcePatternType(pattern)
+		r.ResourcePatternType = AclResourcePatternType(pattern)
 	}
 
 	return nil
 }
 
+//Acl holds information about acl type
 type Acl struct {
 	Principal      string
 	Host           string
@@ -91,6 +93,7 @@
 	return nil
 }
 
+//ResourceAcls is an acl resource type
 type ResourceAcls struct {
 	Resource
 	Acls []*Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
index d3d5ad8..da1cdef 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -1,5 +1,6 @@
 package sarama
 
+//CreateAclsRequest is an acl creation request
 type CreateAclsRequest struct {
 	Version      int16
 	AclCreations []*AclCreation
@@ -38,16 +39,16 @@
 	return nil
 }
 
-func (d *CreateAclsRequest) key() int16 {
+func (c *CreateAclsRequest) key() int16 {
 	return 30
 }
 
-func (d *CreateAclsRequest) version() int16 {
-	return d.Version
+func (c *CreateAclsRequest) version() int16 {
+	return c.Version
 }
 
-func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
 	case 1:
 		return V2_0_0_0
 	default:
@@ -55,6 +56,7 @@
 	}
 }
 
+//AclCreation is a wrapper around Resource and Acl type
 type AclCreation struct {
 	Resource
 	Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
index 8a56f35..f5a5e9a 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -2,6 +2,7 @@
 
 import "time"
 
+//CreateAclsResponse is a an acl reponse creation type
 type CreateAclsResponse struct {
 	ThrottleTime         time.Duration
 	AclCreationResponses []*AclCreationResponse
@@ -46,18 +47,19 @@
 	return nil
 }
 
-func (d *CreateAclsResponse) key() int16 {
+func (c *CreateAclsResponse) key() int16 {
 	return 30
 }
 
-func (d *CreateAclsResponse) version() int16 {
+func (c *CreateAclsResponse) version() int16 {
 	return 0
 }
 
-func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
+//AclCreationResponse is an acl creation response type
 type AclCreationResponse struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
index 5e94ad7..15908ea 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -1,5 +1,6 @@
 package sarama
 
+//DeleteAclsRequest is a delete acl request
 type DeleteAclsRequest struct {
 	Version int
 	Filters []*AclFilter
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
index a885fe5..6529565 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -2,21 +2,22 @@
 
 import "time"
 
+//DeleteAclsResponse is a delete acl response
 type DeleteAclsResponse struct {
 	Version         int16
 	ThrottleTime    time.Duration
 	FilterResponses []*FilterResponse
 }
 
-func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
 
-	if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
+	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
 		return err
 	}
 
-	for _, filterResponse := range a.FilterResponses {
-		if err := filterResponse.encode(pe, a.Version); err != nil {
+	for _, filterResponse := range d.FilterResponses {
+		if err := filterResponse.encode(pe, d.Version); err != nil {
 			return err
 		}
 	}
@@ -24,22 +25,22 @@
 	return nil
 }
 
-func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
 	throttleTime, err := pd.getInt32()
 	if err != nil {
 		return err
 	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
 
 	n, err := pd.getArrayLength()
 	if err != nil {
 		return err
 	}
-	a.FilterResponses = make([]*FilterResponse, n)
+	d.FilterResponses = make([]*FilterResponse, n)
 
 	for i := 0; i < n; i++ {
-		a.FilterResponses[i] = new(FilterResponse)
-		if err := a.FilterResponses[i].decode(pd, version); err != nil {
+		d.FilterResponses[i] = new(FilterResponse)
+		if err := d.FilterResponses[i].decode(pd, version); err != nil {
 			return err
 		}
 	}
@@ -59,6 +60,7 @@
 	return V0_11_0_0
 }
 
+//FilterResponse is a filter response type
 type FilterResponse struct {
 	Err          KError
 	ErrMsg       *string
@@ -109,6 +111,7 @@
 	return nil
 }
 
+//MatchingAcl is a matching acl type
 type MatchingAcl struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
index 3c95320..5222d46 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -1,5 +1,6 @@
 package sarama
 
+//DescribeAclsRequest is a secribe acl request type
 type DescribeAclsRequest struct {
 	Version int
 	AclFilter
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
index db3a88c..12126e5 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -2,6 +2,7 @@
 
 import "time"
 
+//DescribeAclsResponse is a describe acl response type
 type DescribeAclsResponse struct {
 	Version      int16
 	ThrottleTime time.Duration
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
index 72b7985..c10ad7b 100644
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -1,50 +1,51 @@
 package sarama
 
-type AclOperation int
+type (
+	AclOperation int
+
+	AclPermissionType int
+
+	AclResourceType int
+
+	AclResourcePatternType int
+)
 
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
 const (
-	AclOperationUnknown         AclOperation = 0
-	AclOperationAny             AclOperation = 1
-	AclOperationAll             AclOperation = 2
-	AclOperationRead            AclOperation = 3
-	AclOperationWrite           AclOperation = 4
-	AclOperationCreate          AclOperation = 5
-	AclOperationDelete          AclOperation = 6
-	AclOperationAlter           AclOperation = 7
-	AclOperationDescribe        AclOperation = 8
-	AclOperationClusterAction   AclOperation = 9
-	AclOperationDescribeConfigs AclOperation = 10
-	AclOperationAlterConfigs    AclOperation = 11
-	AclOperationIdempotentWrite AclOperation = 12
+	AclOperationUnknown AclOperation = iota
+	AclOperationAny
+	AclOperationAll
+	AclOperationRead
+	AclOperationWrite
+	AclOperationCreate
+	AclOperationDelete
+	AclOperationAlter
+	AclOperationDescribe
+	AclOperationClusterAction
+	AclOperationDescribeConfigs
+	AclOperationAlterConfigs
+	AclOperationIdempotentWrite
 )
 
-type AclPermissionType int
-
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
 const (
-	AclPermissionUnknown AclPermissionType = 0
-	AclPermissionAny     AclPermissionType = 1
-	AclPermissionDeny    AclPermissionType = 2
-	AclPermissionAllow   AclPermissionType = 3
+	AclPermissionUnknown AclPermissionType = iota
+	AclPermissionAny
+	AclPermissionDeny
+	AclPermissionAllow
 )
 
-type AclResourceType int
-
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
 const (
-	AclResourceUnknown         AclResourceType = 0
-	AclResourceAny             AclResourceType = 1
-	AclResourceTopic           AclResourceType = 2
-	AclResourceGroup           AclResourceType = 3
-	AclResourceCluster         AclResourceType = 4
-	AclResourceTransactionalID AclResourceType = 5
+	AclResourceUnknown AclResourceType = iota
+	AclResourceAny
+	AclResourceTopic
+	AclResourceGroup
+	AclResourceCluster
+	AclResourceTransactionalID
 )
 
-type AclResourcePatternType int
-
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
-
 const (
 	AclPatternUnknown AclResourcePatternType = iota
 	AclPatternAny
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
index 6da166c..fc227ab 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -1,5 +1,6 @@
 package sarama
 
+//AddOffsetsToTxnRequest adds offsets to a transaction request
 type AddOffsetsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
index 3a46151..c88c1f8 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -4,6 +4,7 @@
 	"time"
 )
 
+//AddOffsetsToTxnResponse is a response type for adding offsets to txns
 type AddOffsetsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Err          KError
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
index a8a5922..8d4b42e 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -1,5 +1,6 @@
 package sarama
 
+//AddPartitionsToTxnRequest is a add paartition request
 type AddPartitionsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
index 581c556..eb4f23e 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -4,6 +4,7 @@
 	"time"
 )
 
+//AddPartitionsToTxnResponse is a partition errors to transaction type
 type AddPartitionsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Errors       map[string][]*PartitionError
@@ -82,6 +83,7 @@
 	return V0_11_0_0
 }
 
+//PartitionError is a partition error type
 type PartitionError struct {
 	Partition int32
 	Err       KError
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
index 18b055a..1db6a0e 100644
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -20,7 +20,7 @@
 	// List the topics available in the cluster with the default options.
 	ListTopics() (map[string]TopicDetail, error)
 
-	// Describe some topics in the cluster
+	// Describe some topics in the cluster.
 	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
 
 	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
@@ -78,12 +78,15 @@
 	// List the consumer groups available in the cluster.
 	ListConsumerGroups() (map[string]string, error)
 
-	// Describe the given consumer group
+	// Describe the given consumer groups.
 	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
 
 	// List the consumer group offsets available in the cluster.
 	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
 
+	// Delete a consumer group.
+	DeleteConsumerGroup(group string) error
+
 	// Get information about the nodes in the cluster
 	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
 
@@ -131,7 +134,7 @@
 	}
 
 	if detail == nil {
-		return errors.New("You must specify topic details")
+		return errors.New("you must specify topic details")
 	}
 
 	topicDetails := make(map[string]*TopicDetail)
@@ -166,7 +169,7 @@
 	}
 
 	if topicErr.Err != ErrNoError {
-		return topicErr.Err
+		return topicErr
 	}
 
 	return nil
@@ -183,7 +186,9 @@
 		AllowAutoTopicCreation: false,
 	}
 
-	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 5
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
 		request.Version = 4
 	}
 
@@ -358,7 +363,7 @@
 	}
 
 	if topicErr.Err != ErrNoError {
-		return topicErr.Err
+		return topicErr
 	}
 
 	return nil
@@ -369,29 +374,50 @@
 	if topic == "" {
 		return ErrInvalidTopic
 	}
-
-	topics := make(map[string]*DeleteRecordsRequestTopic)
-	topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets}
-	request := &DeleteRecordsRequest{
-		Topics:  topics,
-		Timeout: ca.conf.Admin.Timeout,
+	partitionPerBroker := make(map[*Broker][]int32)
+	for partition := range partitionOffsets {
+		broker, err := ca.client.Leader(topic, partition)
+		if err != nil {
+			return err
+		}
+		if _, ok := partitionPerBroker[broker]; ok {
+			partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+		} else {
+			partitionPerBroker[broker] = []int32{partition}
+		}
 	}
+	errs := make([]error, 0)
+	for broker, partitions := range partitionPerBroker {
+		topics := make(map[string]*DeleteRecordsRequestTopic)
+		recordsToDelete := make(map[int32]int64)
+		for _, p := range partitions {
+			recordsToDelete[p] = partitionOffsets[p]
+		}
+		topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
+		request := &DeleteRecordsRequest{
+			Topics:  topics,
+			Timeout: ca.conf.Admin.Timeout,
+		}
 
-	b, err := ca.Controller()
-	if err != nil {
-		return err
+		rsp, err := broker.DeleteRecords(request)
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+			if !ok {
+				errs = append(errs, ErrIncompleteResponse)
+			} else {
+				for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+					if deleteRecordsResponsePartition.Err != ErrNoError {
+						errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
+					}
+				}
+			}
+		}
 	}
-
-	rsp, err := b.DeleteRecords(request)
-	if err != nil {
-		return err
+	if len(errs) > 0 {
+		return ErrDeleteRecords{MultiError{&errs}}
 	}
-
-	_, ok := rsp.Topics[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
-
 	//todo since we are dealing with couple of partitions it would be good if we return slice of errors
 	//for each partition instead of one error
 	return nil
@@ -606,9 +632,38 @@
 		partitions:    topicPartitions,
 	}
 
-	if ca.conf.Version.IsAtLeast(V0_8_2_2) {
+	if ca.conf.Version.IsAtLeast(V0_10_2_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
 		request.Version = 1
 	}
 
 	return coordinator.FetchOffset(request)
 }
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+	coordinator, err := ca.client.Coordinator(group)
+	if err != nil {
+		return err
+	}
+
+	request := &DeleteGroupsRequest{
+		Groups: []string{group},
+	}
+
+	resp, err := coordinator.DeleteGroups(request)
+	if err != nil {
+		return err
+	}
+
+	groupErr, ok := resp.GroupErrorCodes[group]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if groupErr != ErrNoError {
+		return groupErr
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
index 48c44ea..26c275b 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -1,45 +1,47 @@
 package sarama
 
+//AlterConfigsRequest is an alter config request type
 type AlterConfigsRequest struct {
 	Resources    []*AlterConfigsResource
 	ValidateOnly bool
 }
 
+//AlterConfigsResource is an alter config resource type
 type AlterConfigsResource struct {
 	Type          ConfigResourceType
 	Name          string
 	ConfigEntries map[string]*string
 }
 
-func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(acr.Resources)); err != nil {
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
 		return err
 	}
 
-	for _, r := range acr.Resources {
+	for _, r := range a.Resources {
 		if err := r.encode(pe); err != nil {
 			return err
 		}
 	}
 
-	pe.putBool(acr.ValidateOnly)
+	pe.putBool(a.ValidateOnly)
 	return nil
 }
 
-func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
 	resourceCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
 	}
 
-	acr.Resources = make([]*AlterConfigsResource, resourceCount)
-	for i := range acr.Resources {
+	a.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range a.Resources {
 		r := &AlterConfigsResource{}
 		err = r.decode(pd, version)
 		if err != nil {
 			return err
 		}
-		acr.Resources[i] = r
+		a.Resources[i] = r
 	}
 
 	validateOnly, err := pd.getBool()
@@ -47,22 +49,22 @@
 		return err
 	}
 
-	acr.ValidateOnly = validateOnly
+	a.ValidateOnly = validateOnly
 
 	return nil
 }
 
-func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
-	pe.putInt8(int8(ac.Type))
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
 
-	if err := pe.putString(ac.Name); err != nil {
+	if err := pe.putString(a.Name); err != nil {
 		return err
 	}
 
-	if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
 		return err
 	}
-	for configKey, configValue := range ac.ConfigEntries {
+	for configKey, configValue := range a.ConfigEntries {
 		if err := pe.putString(configKey); err != nil {
 			return err
 		}
@@ -74,18 +76,18 @@
 	return nil
 }
 
-func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
 	t, err := pd.getInt8()
 	if err != nil {
 		return err
 	}
-	ac.Type = ConfigResourceType(t)
+	a.Type = ConfigResourceType(t)
 
 	name, err := pd.getString()
 	if err != nil {
 		return err
 	}
-	ac.Name = name
+	a.Name = name
 
 	n, err := pd.getArrayLength()
 	if err != nil {
@@ -93,13 +95,13 @@
 	}
 
 	if n > 0 {
-		ac.ConfigEntries = make(map[string]*string, n)
+		a.ConfigEntries = make(map[string]*string, n)
 		for i := 0; i < n; i++ {
 			configKey, err := pd.getString()
 			if err != nil {
 				return err
 			}
-			if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
 				return err
 			}
 		}
@@ -107,14 +109,14 @@
 	return err
 }
 
-func (acr *AlterConfigsRequest) key() int16 {
+func (a *AlterConfigsRequest) key() int16 {
 	return 33
 }
 
-func (acr *AlterConfigsRequest) version() int16 {
+func (a *AlterConfigsRequest) version() int16 {
 	return 0
 }
 
-func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
index 29b09e1..3893663 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -2,11 +2,13 @@
 
 import "time"
 
+//AlterConfigsResponse is a reponse type for alter config
 type AlterConfigsResponse struct {
 	ThrottleTime time.Duration
 	Resources    []*AlterConfigsResourceResponse
 }
 
+//AlterConfigsResourceResponse is a reponse type for alter config resource
 type AlterConfigsResourceResponse struct {
 	ErrorCode int16
 	ErrorMsg  string
@@ -14,21 +16,21 @@
 	Name      string
 }
 
-func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
 
-	if err := pe.putArrayLength(len(ct.Resources)); err != nil {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
 		return err
 	}
 
-	for i := range ct.Resources {
-		pe.putInt16(ct.Resources[i].ErrorCode)
-		err := pe.putString(ct.Resources[i].ErrorMsg)
+	for i := range a.Resources {
+		pe.putInt16(a.Resources[i].ErrorCode)
+		err := pe.putString(a.Resources[i].ErrorMsg)
 		if err != nil {
 			return nil
 		}
-		pe.putInt8(int8(ct.Resources[i].Type))
-		err = pe.putString(ct.Resources[i].Name)
+		pe.putInt8(int8(a.Resources[i].Type))
+		err = pe.putString(a.Resources[i].Name)
 		if err != nil {
 			return nil
 		}
@@ -37,59 +39,59 @@
 	return nil
 }
 
-func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
 	throttleTime, err := pd.getInt32()
 	if err != nil {
 		return err
 	}
-	acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
 
 	responseCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
 	}
 
-	acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
 
-	for i := range acr.Resources {
-		acr.Resources[i] = new(AlterConfigsResourceResponse)
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
 
 		errCode, err := pd.getInt16()
 		if err != nil {
 			return err
 		}
-		acr.Resources[i].ErrorCode = errCode
+		a.Resources[i].ErrorCode = errCode
 
 		e, err := pd.getString()
 		if err != nil {
 			return err
 		}
-		acr.Resources[i].ErrorMsg = e
+		a.Resources[i].ErrorMsg = e
 
 		t, err := pd.getInt8()
 		if err != nil {
 			return err
 		}
-		acr.Resources[i].Type = ConfigResourceType(t)
+		a.Resources[i].Type = ConfigResourceType(t)
 
 		name, err := pd.getString()
 		if err != nil {
 			return err
 		}
-		acr.Resources[i].Name = name
+		a.Resources[i].Name = name
 	}
 
 	return nil
 }
 
-func (r *AlterConfigsResponse) key() int16 {
+func (a *AlterConfigsResponse) key() int16 {
 	return 32
 }
 
-func (r *AlterConfigsResponse) version() int16 {
+func (a *AlterConfigsResponse) version() int16 {
 	return 0
 }
 
-func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
index ab65f01..b33167c 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -1,24 +1,25 @@
 package sarama
 
+//ApiVersionsRequest ...
 type ApiVersionsRequest struct {
 }
 
-func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
 	return nil
 }
 
-func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
 	return nil
 }
 
-func (r *ApiVersionsRequest) key() int16 {
+func (a *ApiVersionsRequest) key() int16 {
 	return 18
 }
 
-func (r *ApiVersionsRequest) version() int16 {
+func (a *ApiVersionsRequest) version() int16 {
 	return 0
 }
 
-func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
index 23bc326..bb1f0b3 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -1,5 +1,6 @@
 package sarama
 
+//ApiVersionsResponseBlock is an api version reponse block type
 type ApiVersionsResponseBlock struct {
 	ApiKey     int16
 	MinVersion int16
@@ -31,6 +32,7 @@
 	return nil
 }
 
+//ApiVersionsResponse is an api version response type
 type ApiVersionsResponse struct {
 	Err         KError
 	ApiVersions []*ApiVersionsResponseBlock
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
index 5174a35..11e0849 100644
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -92,9 +92,8 @@
 }
 
 type asyncProducer struct {
-	client    Client
-	conf      *Config
-	ownClient bool
+	client Client
+	conf   *Config
 
 	errors                    chan *ProducerError
 	input, successes, retries chan *ProducerMessage
@@ -113,18 +112,19 @@
 	if err != nil {
 		return nil, err
 	}
-
-	p, err := NewAsyncProducerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-	p.(*asyncProducer).ownClient = true
-	return p, nil
+	return newAsyncProducer(client)
 }
 
 // NewAsyncProducerFromClient creates a new Producer using the given client. It is still
 // necessary to call Close() on the underlying client when shutting down this producer.
 func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
 	// Check that we are not dealing with a closed Client before processing any other arguments
 	if client.Closed() {
 		return nil, ErrClosedClient
@@ -191,10 +191,17 @@
 	// Partition is the partition that the message was sent to. This is only
 	// guaranteed to be defined if the message was successfully delivered.
 	Partition int32
-	// Timestamp is the timestamp assigned to the message by the broker. This
-	// is only guaranteed to be defined if the message was successfully
-	// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
-	// least version 0.10.0.
+	// Timestamp can vary in behaviour depending on broker configuration, being
+	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+	// and requiring version at least 0.10.0.
+	//
+	// When configured to CreateTime, the timestamp is specified by the producer
+	// either by explicitly setting this field, or when the message is added
+	// to a produce set.
+	//
+	// When configured to LogAppendTime, the timestamp assigned to the message
+	// by the broker. This is only guaranteed to be defined if the message was
+	// successfully delivered and RequiredAcks is not NoResponse.
 	Timestamp time.Time
 
 	retries        int
@@ -999,11 +1006,9 @@
 
 	p.inFlight.Wait()
 
-	if p.ownClient {
-		err := p.client.Close()
-		if err != nil {
-			Logger.Println("producer/shutdown failed to close the embedded client:", err)
-		}
+	err := p.client.Close()
+	if err != nil {
+		Logger.Println("producer/shutdown failed to close the embedded client:", err)
 	}
 
 	close(p.input)
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
index e78988d..2fce17f 100644
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -24,7 +24,7 @@
 // --------------------------------------------------------------------
 
 // BalanceStrategy is used to balance topics and partitions
-// across memebers of a consumer group
+// across members of a consumer group
 type BalanceStrategy interface {
 	// Name uniquely identifies the strategy.
 	Name() string
@@ -78,7 +78,7 @@
 // Name implements BalanceStrategy.
 func (s *balanceStrategy) Name() string { return s.name }
 
-// Balance implements BalanceStrategy.
+// Plan implements BalanceStrategy.
 func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
 	// Build members by topic map
 	mbt := make(map[string][]string)
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
index 9129089..9c3e5a0 100644
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -13,24 +13,25 @@
 	"sync/atomic"
 	"time"
 
-	"github.com/rcrowley/go-metrics"
+	metrics "github.com/rcrowley/go-metrics"
 )
 
 // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
 type Broker struct {
-	id   int32
-	addr string
+	conf *Config
 	rack *string
 
-	conf          *Config
+	id            int32
+	addr          string
 	correlationID int32
 	conn          net.Conn
 	connErr       error
 	lock          sync.Mutex
 	opened        int32
+	responses     chan responsePromise
+	done          chan bool
 
-	responses chan responsePromise
-	done      chan bool
+	registeredMetrics []string
 
 	incomingByteRate       metrics.Meter
 	requestRate            metrics.Meter
@@ -46,6 +47,8 @@
 	brokerOutgoingByteRate metrics.Meter
 	brokerResponseRate     metrics.Meter
 	brokerResponseSize     metrics.Histogram
+
+	kerberosAuthenticator GSSAPIKerberosAuth
 }
 
 // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
@@ -56,6 +59,11 @@
 	SASLTypeOAuth = "OAUTHBEARER"
 	// SASLTypePlaintext represents the SASL/PLAIN mechanism
 	SASLTypePlaintext = "PLAIN"
+	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+	SASLTypeGSSAPI      = "GSSAPI"
 	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
 	// server negotiate SASL auth using opaque packets.
 	SASLHandshakeV0 = int16(0)
@@ -92,6 +100,20 @@
 	Token() (*AccessToken, error)
 }
 
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+	// Begin prepares the client for the SCRAM exchange
+	// with the server with a user name and a password
+	Begin(userName, password, authzID string) error
+	// Step steps client through the SCRAM exchange. It is
+	// called repeatedly until it errors or `Done` returns true.
+	Step(challenge string) (response string, err error)
+	// Done should return true when the SCRAM conversation
+	// is over.
+	Done() bool
+}
+
 type responsePromise struct {
 	requestTime   time.Time
 	correlationID int32
@@ -137,6 +159,8 @@
 
 		if conf.Net.TLS.Enable {
 			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+		} else if conf.Net.Proxy.Enable {
+			b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
 		} else {
 			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
 		}
@@ -161,13 +185,7 @@
 		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
 		// the same id (-1) and are already exposed through the global metrics above
 		if b.id >= 0 {
-			b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
-			b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
-			b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
-			b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
-			b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
-			b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
-			b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+			b.registerMetrics()
 		}
 
 		if conf.Net.SASL.Enable {
@@ -210,6 +228,7 @@
 	return b.conn != nil, b.connErr
 }
 
+//Close closes the broker resources
 func (b *Broker) Close() error {
 	b.lock.Lock()
 	defer b.lock.Unlock()
@@ -228,12 +247,7 @@
 	b.done = nil
 	b.responses = nil
 
-	if b.id >= 0 {
-		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
-		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
-		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
-		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
-	}
+	b.unregisterMetrics()
 
 	if err == nil {
 		Logger.Printf("Closed connection to broker %s\n", b.addr)
@@ -267,6 +281,7 @@
 	return *b.rack
 }
 
+//GetMetadata send a metadata request and returns a metadata response or error
 func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
 	response := new(MetadataResponse)
 
@@ -279,6 +294,7 @@
 	return response, nil
 }
 
+//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
 func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
 	response := new(ConsumerMetadataResponse)
 
@@ -291,6 +307,7 @@
 	return response, nil
 }
 
+//FindCoordinator sends a find coordinate request and returns a response or error
 func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
 	response := new(FindCoordinatorResponse)
 
@@ -303,6 +320,7 @@
 	return response, nil
 }
 
+//GetAvailableOffsets return an offset response or error
 func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
 	response := new(OffsetResponse)
 
@@ -315,9 +333,12 @@
 	return response, nil
 }
 
+//Produce returns a produce response or error
 func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
-	var response *ProduceResponse
-	var err error
+	var (
+		response *ProduceResponse
+		err      error
+	)
 
 	if request.RequiredAcks == NoResponse {
 		err = b.sendAndReceive(request, nil)
@@ -333,11 +354,11 @@
 	return response, nil
 }
 
+//Fetch returns a FetchResponse or error
 func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
 	response := new(FetchResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -345,11 +366,11 @@
 	return response, nil
 }
 
+//CommitOffset return an Offset commit reponse or error
 func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
 	response := new(OffsetCommitResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -357,11 +378,11 @@
 	return response, nil
 }
 
+//FetchOffset returns an offset fetch response or error
 func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
 	response := new(OffsetFetchResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -369,6 +390,7 @@
 	return response, nil
 }
 
+//JoinGroup returns a join group response or error
 func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
 	response := new(JoinGroupResponse)
 
@@ -380,6 +402,7 @@
 	return response, nil
 }
 
+//SyncGroup returns a sync group response or error
 func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
 	response := new(SyncGroupResponse)
 
@@ -391,6 +414,7 @@
 	return response, nil
 }
 
+//LeaveGroup return a leave group response or error
 func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
 	response := new(LeaveGroupResponse)
 
@@ -402,6 +426,7 @@
 	return response, nil
 }
 
+//Heartbeat returns a heartbeat response or error
 func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
 	response := new(HeartbeatResponse)
 
@@ -413,6 +438,7 @@
 	return response, nil
 }
 
+//ListGroups return a list group response or error
 func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
 	response := new(ListGroupsResponse)
 
@@ -424,6 +450,7 @@
 	return response, nil
 }
 
+//DescribeGroups return describe group response or error
 func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
 	response := new(DescribeGroupsResponse)
 
@@ -435,6 +462,7 @@
 	return response, nil
 }
 
+//ApiVersions return api version response or error
 func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
 	response := new(ApiVersionsResponse)
 
@@ -446,6 +474,7 @@
 	return response, nil
 }
 
+//CreateTopics send a create topic request and returns create topic response
 func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
 	response := new(CreateTopicsResponse)
 
@@ -457,6 +486,7 @@
 	return response, nil
 }
 
+//DeleteTopics sends a delete topic request and returns delete topic response
 func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
 	response := new(DeleteTopicsResponse)
 
@@ -468,6 +498,8 @@
 	return response, nil
 }
 
+//CreatePartitions sends a create partition request and returns create
+//partitions response or error
 func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
 	response := new(CreatePartitionsResponse)
 
@@ -479,6 +511,8 @@
 	return response, nil
 }
 
+//DeleteRecords send a request to delete records and return delete record
+//response or error
 func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
 	response := new(DeleteRecordsResponse)
 
@@ -490,6 +524,7 @@
 	return response, nil
 }
 
+//DescribeAcls sends a describe acl request and returns a response or error
 func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
 	response := new(DescribeAclsResponse)
 
@@ -501,6 +536,7 @@
 	return response, nil
 }
 
+//CreateAcls sends a create acl request and returns a response or error
 func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
 	response := new(CreateAclsResponse)
 
@@ -512,6 +548,7 @@
 	return response, nil
 }
 
+//DeleteAcls sends a delete acl request and returns a response or error
 func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
 	response := new(DeleteAclsResponse)
 
@@ -523,6 +560,7 @@
 	return response, nil
 }
 
+//InitProducerID sends an init producer request and returns a response or error
 func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
 	response := new(InitProducerIDResponse)
 
@@ -534,6 +572,8 @@
 	return response, nil
 }
 
+//AddPartitionsToTxn send a request to add partition to txn and returns
+//a response or error
 func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
 	response := new(AddPartitionsToTxnResponse)
 
@@ -545,6 +585,8 @@
 	return response, nil
 }
 
+//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+//or error
 func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
 	response := new(AddOffsetsToTxnResponse)
 
@@ -556,6 +598,7 @@
 	return response, nil
 }
 
+//EndTxn sends a request to end txn and returns a response or error
 func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
 	response := new(EndTxnResponse)
 
@@ -567,6 +610,8 @@
 	return response, nil
 }
 
+//TxnOffsetCommit sends a request to commit transaction offsets and returns
+//a response or error
 func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
 	response := new(TxnOffsetCommitResponse)
 
@@ -578,6 +623,8 @@
 	return response, nil
 }
 
+//DescribeConfigs sends a request to describe config and returns a response or
+//error
 func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
 	response := new(DescribeConfigsResponse)
 
@@ -589,6 +636,7 @@
 	return response, nil
 }
 
+//AlterConfigs sends a request to alter config and return a response or error
 func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
 	response := new(AlterConfigsResponse)
 
@@ -600,6 +648,7 @@
 	return response, nil
 }
 
+//DeleteGroups sends a request to delete groups and returns a response or error
 func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
 	response := new(DeleteGroupsResponse)
 
@@ -638,7 +687,7 @@
 
 	requestTime := time.Now()
 	bytes, err := b.conn.Write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes)
+	b.updateOutgoingCommunicationMetrics(bytes) //TODO: should it be after error check
 	if err != nil {
 		return nil, err
 	}
@@ -658,7 +707,6 @@
 
 func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
 	promise, err := b.send(req, res != nil)
-
 	if err != nil {
 		return err
 	}
@@ -707,11 +755,11 @@
 }
 
 func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
-
 	host, portstr, err := net.SplitHostPort(b.addr)
 	if err != nil {
 		return err
 	}
+
 	port, err := strconv.Atoi(portstr)
 	if err != nil {
 		return err
@@ -739,6 +787,7 @@
 func (b *Broker) responseReceiver() {
 	var dead error
 	header := make([]byte, 8)
+
 	for response := range b.responses {
 		if dead != nil {
 			response.errors <- dead
@@ -793,14 +842,28 @@
 }
 
 func (b *Broker) authenticateViaSASL() error {
-	if b.conf.Net.SASL.Mechanism == SASLTypeOAuth {
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeOAuth:
 		return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv1()
+	case SASLTypeGSSAPI:
+		return b.sendAndReceiveKerberos()
+	default:
+		return b.sendAndReceiveSASLPlainAuth()
 	}
-	return b.sendAndReceiveSASLPlainAuth()
 }
 
-func (b *Broker) sendAndReceiveSASLHandshake(saslType string, version int16) error {
-	rb := &SaslHandshakeRequest{Mechanism: saslType, Version: version}
+func (b *Broker) sendAndReceiveKerberos() error {
+	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+	}
+	return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
 
 	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
 	buf, err := encode(req, b.conf.MetricRegistry)
@@ -828,6 +891,7 @@
 		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
 		return err
 	}
+
 	length := binary.BigEndian.Uint32(header[:4])
 	payload := make([]byte, length-4)
 	n, err := io.ReadFull(b.conn, payload)
@@ -835,23 +899,29 @@
 		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
 		return err
 	}
+
 	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
 	res := &SaslHandshakeResponse{}
+
 	err = versionedDecode(payload, res, 0)
 	if err != nil {
 		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
 		return err
 	}
+
 	if res.Err != ErrNoError {
 		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
 		return res.Err
 	}
-	Logger.Print("Successful SASL handshake")
+
+	Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
 	return nil
 }
 
-// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
-// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
 //
 // In SASL Plain, Kafka expects the auth header to be in the following format
 // Message format (from https://tools.ietf.org/html/rfc4616):
@@ -865,17 +935,34 @@
 //   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
 //                  ;; any UTF-8 encoded Unicode character except NUL
 //
+// With SASL v0 handshake and auth then:
 // When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
-// of responding to bad credentials but thats how its being done today.
+// When credentials are invalid, Kafka closes the connection.
+//
+// With SASL v1 handshake and auth then:
+// When credentials are invalid, Kafka replies with a SaslAuthenticate response
+// containing an error code and message detailing the authentication failure.
 func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+	// default to V0 to allow for backward compatability when SASL is enabled
+	// but not the handshake
 	if b.conf.Net.SASL.Handshake {
-		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, SASLHandshakeV0)
+
+		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
 		if handshakeErr != nil {
 			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
 			return handshakeErr
 		}
 	}
+
+	if b.conf.Net.SASL.Version == SASLHandshakeV1 {
+		return b.sendAndReceiveV1SASLPlainAuth()
+	}
+	return b.sendAndReceiveV0SASLPlainAuth()
+}
+
+// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
+func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
+
 	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
 	authBytes := make([]byte, length+4) //4 byte length header + auth data
 	binary.BigEndian.PutUint32(authBytes, uint32(length))
@@ -909,55 +996,197 @@
 	return nil
 }
 
+// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
+func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
+	correlationID := b.correlationID
+
+	requestTime := time.Now()
+
+	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
+
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+
+	if err != nil {
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	b.correlationID++
+
+	bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
+	b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
+
+	// With v1 sasl we get an error message set in the response we can return
+	if err != nil {
+		Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	return nil
+}
+
 // sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
 // https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
 func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
-
 	if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
 		return err
 	}
 
 	token, err := provider.Token()
-
 	if err != nil {
 		return err
 	}
 
-	requestTime := time.Now()
-
-	correlationID := b.correlationID
-
-	bytesWritten, err := b.sendSASLOAuthBearerClientResponse(token, correlationID)
-
+	message, err := buildClientFirstMessage(token)
 	if err != nil {
 		return err
 	}
 
+	challenged, err := b.sendClientMessage(message)
+	if err != nil {
+		return err
+	}
+
+	if challenged {
+		// Abort the token exchange. The broker returns the failure code.
+		_, err = b.sendClientMessage([]byte(`\x01`))
+	}
+
+	return err
+}
+
+// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
+// if the broker responds with a challenge, in which case the token is
+// rejected.
+func (b *Broker) sendClientMessage(message []byte) (bool, error) {
+
+	requestTime := time.Now()
+	correlationID := b.correlationID
+
+	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
+	if err != nil {
+		return false, err
+	}
+
 	b.updateOutgoingCommunicationMetrics(bytesWritten)
-
 	b.correlationID++
 
-	bytesRead, err := b.receiveSASLOAuthBearerServerResponse(correlationID)
-
-	if err != nil {
-		return err
-	}
+	res := &SaslAuthenticateResponse{}
+	bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
 
 	requestLatency := time.Since(requestTime)
 	b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
 
+	isChallenge := len(res.SaslAuthBytes) > 0
+
+	if isChallenge && err != nil {
+		Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
+	}
+
+	return isChallenge, err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
+	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
+		return err
+	}
+
+	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
+
+	}
+
+	for !scramClient.Done() {
+		requestTime := time.Now()
+		correlationID := b.correlationID
+		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
+		if err != nil {
+			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+
+		b.updateOutgoingCommunicationMetrics(bytesWritten)
+		b.correlationID++
+		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
+		if err != nil {
+			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+
+		b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
+		msg, err = scramClient.Step(string(challenge))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	Logger.Println("SASL authentication succeeded")
 	return nil
 }
 
+func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
+	rb := &SaslAuthenticateRequest{msg}
+	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return 0, err
+	}
+
+	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+
+	return b.conn.Write(buf)
+}
+
+func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
+	buf := make([]byte, responseLengthSize+correlationIDSize)
+	_, err := io.ReadFull(b.conn, buf)
+	if err != nil {
+		return nil, err
+	}
+
+	header := responseHeader{}
+	err = decode(buf, &header)
+	if err != nil {
+		return nil, err
+	}
+
+	if header.correlationID != correlationID {
+		return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
+	}
+
+	buf = make([]byte, header.length-correlationIDSize)
+	_, err = io.ReadFull(b.conn, buf)
+	if err != nil {
+		return nil, err
+	}
+
+	res := &SaslAuthenticateResponse{}
+	if err := versionedDecode(buf, res, 0); err != nil {
+		return nil, err
+	}
+	if res.Err != ErrNoError {
+		return nil, res.Err
+	}
+	return res.SaslAuthBytes, nil
+}
+
 // Build SASL/OAUTHBEARER initial client response as described by RFC-7628
 // https://tools.ietf.org/html/rfc7628
-func buildClientInitialResponse(token *AccessToken) ([]byte, error) {
-
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
 	var ext string
 
 	if token.Extensions != nil && len(token.Extensions) > 0 {
 		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
-			return []byte{}, fmt.Errorf("The extension `%s` is invalid", SASLExtKeyAuth)
+			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
 		}
 		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
 	}
@@ -970,7 +1199,6 @@
 // mapToString returns a list of key-value pairs ordered by key.
 // keyValSep separates the key from the value. elemSep separates each pair.
 func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
-
 	buf := make([]string, 0, len(extensions))
 
 	for k, v := range extensions {
@@ -982,20 +1210,30 @@
 	return strings.Join(buf, elemSep)
 }
 
-func (b *Broker) sendSASLOAuthBearerClientResponse(token *AccessToken, correlationID int32) (int, error) {
-
-	initialResp, err := buildClientInitialResponse(token)
-
+func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
+	authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	rb := &SaslAuthenticateRequest{authBytes}
+	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
 	if err != nil {
 		return 0, err
 	}
 
+	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+		return 0, err
+	}
+	return b.conn.Write(buf)
+}
+
+func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
+
 	rb := &SaslAuthenticateRequest{initialResp}
 
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
 
 	buf, err := encode(req, b.conf.MetricRegistry)
-
 	if err != nil {
 		return 0, err
 	}
@@ -1007,12 +1245,11 @@
 	return b.conn.Write(buf)
 }
 
-func (b *Broker) receiveSASLOAuthBearerServerResponse(correlationID int32) (int, error) {
+func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
 
-	buf := make([]byte, 8)
+	buf := make([]byte, responseLengthSize+correlationIDSize)
 
 	bytesRead, err := io.ReadFull(b.conn, buf)
-
 	if err != nil {
 		return bytesRead, err
 	}
@@ -1020,7 +1257,6 @@
 	header := responseHeader{}
 
 	err = decode(buf, &header)
-
 	if err != nil {
 		return bytesRead, err
 	}
@@ -1029,48 +1265,39 @@
 		return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
 	}
 
-	buf = make([]byte, header.length-4)
+	buf = make([]byte, header.length-correlationIDSize)
 
 	c, err := io.ReadFull(b.conn, buf)
-
 	bytesRead += c
-
 	if err != nil {
 		return bytesRead, err
 	}
 
-	res := &SaslAuthenticateResponse{}
-
 	if err := versionedDecode(buf, res, 0); err != nil {
 		return bytesRead, err
 	}
 
-	if err != nil {
-		return bytesRead, err
-	}
-
 	if res.Err != ErrNoError {
 		return bytesRead, res.Err
 	}
 
-	if len(res.SaslAuthBytes) > 0 {
-		Logger.Printf("Received SASL auth response: %s", res.SaslAuthBytes)
-	}
-
 	return bytesRead, nil
 }
 
 func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
 	b.updateRequestLatencyMetrics(requestLatency)
 	b.responseRate.Mark(1)
+
 	if b.brokerResponseRate != nil {
 		b.brokerResponseRate.Mark(1)
 	}
+
 	responseSize := int64(bytes)
 	b.incomingByteRate.Mark(responseSize)
 	if b.brokerIncomingByteRate != nil {
 		b.brokerIncomingByteRate.Mark(responseSize)
 	}
+
 	b.responseSize.Update(responseSize)
 	if b.brokerResponseSize != nil {
 		b.brokerResponseSize.Update(responseSize)
@@ -1080,9 +1307,11 @@
 func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
 	requestLatencyInMs := int64(requestLatency / time.Millisecond)
 	b.requestLatency.Update(requestLatencyInMs)
+
 	if b.brokerRequestLatency != nil {
 		b.brokerRequestLatency.Update(requestLatencyInMs)
 	}
+
 }
 
 func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
@@ -1090,13 +1319,44 @@
 	if b.brokerRequestRate != nil {
 		b.brokerRequestRate.Mark(1)
 	}
+
 	requestSize := int64(bytes)
 	b.outgoingByteRate.Mark(requestSize)
 	if b.brokerOutgoingByteRate != nil {
 		b.brokerOutgoingByteRate.Mark(requestSize)
 	}
+
 	b.requestSize.Update(requestSize)
 	if b.brokerRequestSize != nil {
 		b.brokerRequestSize.Update(requestSize)
 	}
+
+}
+
+func (b *Broker) registerMetrics() {
+	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+	b.brokerRequestRate = b.registerMeter("request-rate")
+	b.brokerRequestSize = b.registerHistogram("request-size")
+	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+	b.brokerResponseRate = b.registerMeter("response-rate")
+	b.brokerResponseSize = b.registerHistogram("response-size")
+}
+
+func (b *Broker) unregisterMetrics() {
+	for _, name := range b.registeredMetrics {
+		b.conf.MetricRegistry.Unregister(name)
+	}
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
 }
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
index 0016f8f..c4c54b2 100644
--- a/vendor/github.com/Shopify/sarama/client.go
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -46,6 +46,10 @@
 	// the partition leader.
 	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
 
+	// OfflineReplicas returns the set of all offline replica IDs for the given
+	// partition. Offline replicas are replicas which are offline
+	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
 	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
 	// available metadata for those topics. If no topics are provided, it will refresh
 	// metadata for all topics.
@@ -288,7 +292,8 @@
 		partitions = client.cachedPartitions(topic, allPartitions)
 	}
 
-	if partitions == nil {
+	// no partitions found after refresh metadata
+	if len(partitions) == 0 {
 		return nil, ErrUnknownTopicOrPartition
 	}
 
@@ -373,6 +378,31 @@
 	return dupInt32Slice(metadata.Isr), nil
 }
 
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
+	}
+	return dupInt32Slice(metadata.OfflineReplicas), nil
+}
+
 func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
 	if client.Closed() {
 		return nil, ErrClosedClient
@@ -405,7 +435,11 @@
 		}
 	}
 
-	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+	deadline := time.Time{}
+	if client.conf.Metadata.Timeout > 0 {
+		deadline = time.Now().Add(client.conf.Metadata.Timeout)
+	}
+	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
 }
 
 func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
@@ -707,32 +741,47 @@
 	return nil
 }
 
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+	pastDeadline := func(backoff time.Duration) bool {
+		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+			// we are past the deadline
+			return true
+		}
+		return false
+	}
 	retry := func(err error) error {
 		if attemptsRemaining > 0 {
 			backoff := client.computeBackoff(attemptsRemaining)
+			if pastDeadline(backoff) {
+				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+				return err
+			}
 			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
 			if backoff > 0 {
 				time.Sleep(backoff)
 			}
-			return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+			return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
 		}
 		return err
 	}
 
-	for broker := client.any(); broker != nil; broker = client.any() {
+	broker := client.any()
+	for ; broker != nil && !pastDeadline(0); broker = client.any() {
+		allowAutoTopicCreation := true
 		if len(topics) > 0 {
 			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
 		} else {
+			allowAutoTopicCreation = false
 			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
 		}
 
-		req := &MetadataRequest{Topics: topics}
-		if client.conf.Version.IsAtLeast(V0_10_0_0) {
+		req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
+		if client.conf.Version.IsAtLeast(V1_0_0_0) {
+			req.Version = 5
+		} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
 			req.Version = 1
 		}
 		response, err := broker.GetMetadata(req)
-
 		switch err.(type) {
 		case nil:
 			allKnownMetaData := len(topics) == 0
@@ -747,6 +796,18 @@
 		case PacketEncodingError:
 			// didn't even send, return the error
 			return err
+
+		case KError:
+			// if SASL auth error return as this _should_ be a non retryable err for all brokers
+			if err.(KError) == ErrSASLAuthenticationFailed {
+				Logger.Println("client/metadata failed SASL authentication")
+				return err
+			}
+			// else remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+
 		default:
 			// some other error, remove that broker and try again
 			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
@@ -755,6 +816,11 @@
 		}
 	}
 
+	if broker != nil {
+		Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		return retry(ErrOutOfBrokers)
+	}
+
 	Logger.Println("client/metadata no available broker to send metadata request to")
 	client.resurrectDeadBrokers()
 	return retry(ErrOutOfBrokers)
@@ -792,7 +858,7 @@
 
 		switch topic.Err {
 		case ErrNoError:
-			break
+			// no-op
 		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
 			err = topic.Err
 			continue
@@ -802,7 +868,6 @@
 			continue
 		case ErrLeaderNotAvailable: // retry, but store partial partition results
 			retry = true
-			break
 		default: // don't retry, don't store partial results
 			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
 			err = topic.Err
@@ -847,9 +912,8 @@
 		maxRetries := client.conf.Metadata.Retry.Max
 		retries := maxRetries - attemptsRemaining
 		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
-	} else {
-		return client.conf.Metadata.Retry.Backoff
 	}
+	return client.conf.Metadata.Retry.Backoff
 }
 
 func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
@@ -911,3 +975,18 @@
 	client.resurrectDeadBrokers()
 	return retry(ErrOutOfBrokers)
 }
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+	Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
index 9495b7f..e2e6513 100644
--- a/vendor/github.com/Shopify/sarama/config.go
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -10,6 +10,7 @@
 	"time"
 
 	"github.com/rcrowley/go-metrics"
+	"golang.org/x/net/proxy"
 )
 
 const defaultClientID = "sarama"
@@ -57,18 +58,28 @@
 			// SASLMechanism is the name of the enabled SASL mechanism.
 			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
 			Mechanism SASLMechanism
+			// Version is the SASL Protocol Version to use
+			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
+			Version int16
 			// Whether or not to send the Kafka SASL handshake first if enabled
 			// (defaults to true). You should only set this to false if you're using
 			// a non-Kafka SASL proxy.
 			Handshake bool
-			//username and password for SASL/PLAIN authentication
+			//username and password for SASL/PLAIN  or SASL/SCRAM authentication
 			User     string
 			Password string
+			// authz id used for SASL/SCRAM authentication
+			SCRAMAuthzID string
+			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+			// client used to perform the SCRAM exchange with the server.
+			SCRAMClientGeneratorFunc func() SCRAMClient
 			// TokenProvider is a user-defined callback for generating
 			// access tokens for SASL/OAUTHBEARER auth. See the
 			// AccessTokenProvider interface docs for proper implementation
 			// guidelines.
 			TokenProvider AccessTokenProvider
+
+			GSSAPI GSSAPIConfig
 		}
 
 		// KeepAlive specifies the keep-alive period for an active network connection.
@@ -80,6 +91,14 @@
 		// network being dialed.
 		// If nil, a local address is automatically chosen.
 		LocalAddr net.Addr
+
+		Proxy struct {
+			// Whether or not to use proxy when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The proxy dialer to use enabled (defaults to nil).
+			Dialer proxy.Dialer
+		}
 	}
 
 	// Metadata is the namespace for metadata management properties used by the
@@ -107,6 +126,13 @@
 		// and usually more convenient, but can take up a substantial amount of
 		// memory if you have many topics and partitions. Defaults to true.
 		Full bool
+
+		// How long to wait for a successful metadata response.
+		// Disabled by default which means a metadata request against an unreachable
+		// cluster (all brokers are unreachable or unresponsive) can take up to
+		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+		// to fail.
+		Timeout time.Duration
 	}
 
 	// Producer is the namespace for configuration related to producing messages,
@@ -333,6 +359,11 @@
 				Max int
 			}
 		}
+
+		// IsolationLevel support 2 mode:
+		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
+		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
+		IsolationLevel IsolationLevel
 	}
 
 	// A user-provided string sent with every request to the brokers for logging,
@@ -370,6 +401,7 @@
 	c.Net.ReadTimeout = 30 * time.Second
 	c.Net.WriteTimeout = 30 * time.Second
 	c.Net.SASL.Handshake = true
+	c.Net.SASL.Version = SASLHandshakeV0
 
 	c.Metadata.Retry.Max = 3
 	c.Metadata.Retry.Backoff = 250 * time.Millisecond
@@ -414,10 +446,10 @@
 // ConfigurationError if the specified values don't make sense.
 func (c *Config) Validate() error {
 	// some configuration values should be warned on but not fail completely, do those first
-	if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
 		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
 	}
-	if c.Net.SASL.Enable == false {
+	if !c.Net.SASL.Enable {
 		if c.Net.SASL.User != "" {
 			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
 		}
@@ -475,22 +507,62 @@
 	case c.Net.KeepAlive < 0:
 		return ConfigurationError("Net.KeepAlive must be >= 0")
 	case c.Net.SASL.Enable:
-		// For backwards compatibility, empty mechanism value defaults to PLAIN
-		isSASLPlain := len(c.Net.SASL.Mechanism) == 0 || c.Net.SASL.Mechanism == SASLTypePlaintext
-		if isSASLPlain {
+		if c.Net.SASL.Mechanism == "" {
+			c.Net.SASL.Mechanism = SASLTypePlaintext
+		}
+
+		switch c.Net.SASL.Mechanism {
+		case SASLTypePlaintext:
 			if c.Net.SASL.User == "" {
 				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
 			}
 			if c.Net.SASL.Password == "" {
 				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
 			}
-		} else if c.Net.SASL.Mechanism == SASLTypeOAuth {
+		case SASLTypeOAuth:
 			if c.Net.SASL.TokenProvider == nil {
-				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.User.TokenProvider")
+				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
 			}
-		} else {
-			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s` and `%s`",
-				SASLTypeOAuth, SASLTypePlaintext)
+		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+			}
+		case SASLTypeGSSAPI:
+			if c.Net.SASL.GSSAPI.ServiceName == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+			}
+
+			if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
+				if c.Net.SASL.GSSAPI.Password == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+				}
+			} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
+				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+						" and  Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+				}
+			} else {
+				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
+			}
+			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Username == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Realm == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+			}
+		default:
+			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
 			return ConfigurationError(msg)
 		}
 	}
@@ -584,6 +656,13 @@
 		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
 	case c.Consumer.Offsets.Retry.Max < 0:
 		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+	}
+
+	// validate IsolationLevel
+	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
 	}
 
 	// validate the Consumer Group values
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
index 848cc9c..5399d75 100644
--- a/vendor/github.com/Shopify/sarama/config_resource_type.go
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -1,15 +1,22 @@
 package sarama
 
+//ConfigResourceType is a type for config resource
 type ConfigResourceType int8
 
 // Taken from :
 // https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
 
 const (
-	UnknownResource ConfigResourceType = 0
-	AnyResource     ConfigResourceType = 1
-	TopicResource   ConfigResourceType = 2
-	GroupResource   ConfigResourceType = 3
-	ClusterResource ConfigResourceType = 4
-	BrokerResource  ConfigResourceType = 5
+	//UnknownResource constant type
+	UnknownResource ConfigResourceType = iota
+	//AnyResource constant type
+	AnyResource
+	//TopicResource constant type
+	TopicResource
+	//GroupResource constant type
+	GroupResource
+	//ClusterResource constant type
+	ClusterResource
+	//BrokerResource constant type
+	BrokerResource
 )
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
index ce72ff1..72c4d7c 100644
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -3,20 +3,24 @@
 import (
 	"errors"
 	"fmt"
+	"math"
 	"sync"
 	"sync/atomic"
 	"time"
+
+	"github.com/rcrowley/go-metrics"
 )
 
 // ConsumerMessage encapsulates a Kafka message returned by the consumer.
 type ConsumerMessage struct {
-	Key, Value     []byte
-	Topic          string
-	Partition      int32
-	Offset         int64
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
 	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
 	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
-	Headers        []*RecordHeader // only set if kafka is version 0.11+
+
+	Key, Value []byte
+	Topic      string
+	Partition  int32
+	Offset     int64
 }
 
 // ConsumerError is what is provided to the user when an error occurs.
@@ -43,13 +47,7 @@
 // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
 // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
 // scope.
-//
-// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
-// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
-// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
-// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
 type Consumer interface {
-
 	// Topics returns the set of available topics as retrieved from the cluster
 	// metadata. This method is the same as Client.Topics(), and is provided for
 	// convenience.
@@ -75,13 +73,11 @@
 }
 
 type consumer struct {
-	client    Client
-	conf      *Config
-	ownClient bool
-
-	lock            sync.Mutex
+	conf            *Config
 	children        map[string]map[int32]*partitionConsumer
 	brokerConsumers map[*Broker]*brokerConsumer
+	client          Client
+	lock            sync.Mutex
 }
 
 // NewConsumer creates a new consumer using the given broker addresses and configuration.
@@ -90,18 +86,19 @@
 	if err != nil {
 		return nil, err
 	}
-
-	c, err := NewConsumerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-	c.(*consumer).ownClient = true
-	return c, nil
+	return newConsumer(client)
 }
 
 // NewConsumerFromClient creates a new consumer using the given client. It is still
 // necessary to call Close() on the underlying client when shutting down this consumer.
 func NewConsumerFromClient(client Client) (Consumer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
 	// Check that we are not dealing with a closed Client before processing any other arguments
 	if client.Closed() {
 		return nil, ErrClosedClient
@@ -118,10 +115,7 @@
 }
 
 func (c *consumer) Close() error {
-	if c.ownClient {
-		return c.client.Close()
-	}
-	return nil
+	return c.client.Close()
 }
 
 func (c *consumer) Topics() ([]string, error) {
@@ -261,12 +255,11 @@
 // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
 //
 // To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
-// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
 // AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
 // Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
 // also drain the Messages channel, harvest all errors & return them once cleanup has completed.
 type PartitionConsumer interface {
-
 	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
 	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
 	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
@@ -298,24 +291,22 @@
 
 type partitionConsumer struct {
 	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-	consumer            *consumer
-	conf                *Config
-	topic               string
-	partition           int32
 
+	consumer *consumer
+	conf     *Config
 	broker   *brokerConsumer
 	messages chan *ConsumerMessage
 	errors   chan *ConsumerError
 	feeder   chan *FetchResponse
 
 	trigger, dying chan none
-	responseResult error
 	closeOnce      sync.Once
-
-	fetchSize int32
-	offset    int64
-
-	retries int32
+	topic          string
+	partition      int32
+	responseResult error
+	fetchSize      int32
+	offset         int64
+	retries        int32
 }
 
 var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
@@ -338,9 +329,8 @@
 	if child.conf.Consumer.Retry.BackoffFunc != nil {
 		retries := atomic.AddInt32(&child.retries, 1)
 		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
-	} else {
-		return child.conf.Consumer.Retry.Backoff
 	}
+	return child.conf.Consumer.Retry.Backoff
 }
 
 func (child *partitionConsumer) dispatcher() {
@@ -432,12 +422,6 @@
 func (child *partitionConsumer) Close() error {
 	child.AsyncClose()
 
-	go withRecover(func() {
-		for range child.messages {
-			// drain
-		}
-	})
-
 	var errors ConsumerErrors
 	for err := range child.errors {
 		errors = append(errors, err)
@@ -469,14 +453,22 @@
 		for i, msg := range msgs {
 		messageSelect:
 			select {
+			case <-child.dying:
+				child.broker.acks.Done()
+				continue feederLoop
 			case child.messages <- msg:
 				firstAttempt = true
 			case <-expiryTicker.C:
 				if !firstAttempt {
 					child.responseResult = errTimedOut
 					child.broker.acks.Done()
+				remainingLoop:
 					for _, msg = range msgs[i:] {
-						child.messages <- msg
+						select {
+						case child.messages <- msg:
+						case <-child.dying:
+							break remainingLoop
+						}
 					}
 					child.broker.input <- child
 					continue feederLoop
@@ -532,7 +524,8 @@
 }
 
 func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
-	var messages []*ConsumerMessage
+	messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
 	for _, rec := range batch.Records {
 		offset := batch.FirstOffset + rec.OffsetDelta
 		if offset < child.offset {
@@ -560,6 +553,23 @@
 }
 
 func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	var (
+		metricRegistry          = child.conf.MetricRegistry
+		consumerBatchSizeMetric metrics.Histogram
+	)
+
+	if metricRegistry != nil {
+		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
+	}
+
+	// If request was throttled and empty we log and return without error
+	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+		Logger.Printf(
+			"consumer/broker/%d FetchResponse throttled %v\n",
+			child.broker.broker.ID(), response.ThrottleTime)
+		return nil, nil
+	}
+
 	block := response.GetBlock(child.topic, child.partition)
 	if block == nil {
 		return nil, ErrIncompleteResponse
@@ -573,6 +583,9 @@
 	if err != nil {
 		return nil, err
 	}
+
+	consumerBatchSizeMetric.Update(int64(nRecs))
+
 	if nRecs == 0 {
 		partialTrailingMessage, err := block.isPartial()
 		if err != nil {
@@ -587,6 +600,10 @@
 				child.offset++ // skip this one so we can keep processing future messages
 			} else {
 				child.fetchSize *= 2
+				// check int32 overflow
+				if child.fetchSize < 0 {
+					child.fetchSize = math.MaxInt32
+				}
 				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
 					child.fetchSize = child.conf.Consumer.Fetch.Max
 				}
@@ -600,6 +617,12 @@
 	child.fetchSize = child.conf.Consumer.Fetch.Default
 	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
 
+	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
+	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+	abortedTransactions := block.getAbortedTransactions()
+
 	messages := []*ConsumerMessage{}
 	for _, records := range block.RecordsSet {
 		switch records.recordsType {
@@ -611,13 +634,55 @@
 
 			messages = append(messages, messageSetMessages...)
 		case defaultRecords:
+			// Consume remaining abortedTransaction up to last offset of current batch
+			for _, txn := range abortedTransactions {
+				if txn.FirstOffset > records.RecordBatch.LastOffset() {
+					break
+				}
+				abortedProducerIDs[txn.ProducerID] = struct{}{}
+				// Pop abortedTransactions so that we never add it again
+				abortedTransactions = abortedTransactions[1:]
+			}
+
 			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
 			if err != nil {
 				return nil, err
 			}
-			if control, err := records.isControl(); err != nil || control {
+
+			// Parse and commit offset but do not expose messages that are:
+			// - control records
+			// - part of an aborted transaction when set to `ReadCommitted`
+
+			// control record
+			isControl, err := records.isControl()
+			if err != nil {
+				// I don't know why there is this continue in case of error to begin with
+				// Safe bet is to ignore control messages if ReadUncommitted
+				// and block on them in case of error and ReadCommitted
+				if child.conf.Consumer.IsolationLevel == ReadCommitted {
+					return nil, err
+				}
 				continue
 			}
+			if isControl {
+				controlRecord, err := records.getControlRecord()
+				if err != nil {
+					return nil, err
+				}
+
+				if controlRecord.Type == ControlRecordAbort {
+					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+				}
+				continue
+			}
+
+			// filter aborted transactions
+			if child.conf.Consumer.IsolationLevel == ReadCommitted {
+				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+				if records.RecordBatch.IsTransactional && isAborted {
+					continue
+				}
+			}
 
 			messages = append(messages, recordBatchMessages...)
 		default:
@@ -628,15 +693,13 @@
 	return messages, nil
 }
 
-// brokerConsumer
-
 type brokerConsumer struct {
 	consumer         *consumer
 	broker           *Broker
 	input            chan *partitionConsumer
 	newSubscriptions chan []*partitionConsumer
-	wait             chan none
 	subscriptions    map[*partitionConsumer]none
+	wait             chan none
 	acks             sync.WaitGroup
 	refs             int
 }
@@ -658,14 +721,14 @@
 	return bc
 }
 
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+// so the main goroutine can block waiting for work if it has none.
 func (bc *brokerConsumer) subscriptionManager() {
 	var buffer []*partitionConsumer
 
-	// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
-	// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
-	// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
-	// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
-	// so the main goroutine can block waiting for work if it has none.
 	for {
 		if len(buffer) > 0 {
 			select {
@@ -698,10 +761,10 @@
 	close(bc.newSubscriptions)
 }
 
+//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
 func (bc *brokerConsumer) subscriptionConsumer() {
 	<-bc.wait // wait for our first piece of work
 
-	// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
 	for newSubscriptions := range bc.newSubscriptions {
 		bc.updateSubscriptions(newSubscriptions)
 
@@ -742,20 +805,20 @@
 			close(child.trigger)
 			delete(bc.subscriptions, child)
 		default:
-			break
+			// no-op
 		}
 	}
 }
 
+//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
 func (bc *brokerConsumer) handleResponses() {
-	// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
 	for child := range bc.subscriptions {
 		result := child.responseResult
 		child.responseResult = nil
 
 		switch result {
 		case nil:
-			break
+			// no-op
 		case errTimedOut:
 			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
 				bc.broker.ID(), child.topic, child.partition)
@@ -822,7 +885,7 @@
 	}
 	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
 		request.Version = 4
-		request.Isolation = ReadUncommitted // We don't support yet transactions.
+		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
 	}
 
 	for child := range bc.subscriptions {
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
index 8c8babc..8de9513 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -52,8 +52,7 @@
 }
 
 type consumerGroup struct {
-	client    Client
-	ownClient bool
+	client Client
 
 	config   *Config
 	consumer Consumer
@@ -73,20 +72,24 @@
 		return nil, err
 	}
 
-	c, err := NewConsumerGroupFromClient(groupID, client)
+	c, err := newConsumerGroup(groupID, client)
 	if err != nil {
 		_ = client.Close()
-		return nil, err
 	}
-
-	c.(*consumerGroup).ownClient = true
-	return c, nil
+	return c, err
 }
 
 // NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
 // necessary to call Close() on the underlying client when shutting down this consumer.
 // PLEASE NOTE: consumer groups can only re-use but not share clients.
 func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
 	config := client.Config()
 	if !config.Version.IsAtLeast(V0_10_2_0) {
 		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
@@ -131,10 +134,8 @@
 			err = e
 		}
 
-		if c.ownClient {
-			if e := c.client.Close(); e != nil {
-				err = e
-			}
+		if e := c.client.Close(); e != nil {
+			err = e
 		}
 	})
 	return
@@ -162,14 +163,8 @@
 		return err
 	}
 
-	// Get coordinator
-	coordinator, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		return err
-	}
-
 	// Init session
-	sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
 	if err == ErrClosedClient {
 		return ErrClosedConsumerGroup
 	} else if err != nil {
@@ -183,7 +178,33 @@
 	return sess.release(true)
 }
 
-func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+	select {
+	case <-c.closed:
+		return nil, ErrClosedConsumerGroup
+	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+	}
+
+	if refreshCoordinator {
+		err := c.client.RefreshCoordinator(c.groupID)
+		if err != nil {
+			return c.retryNewSession(ctx, topics, handler, retries, true)
+		}
+	}
+
+	return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		if retries <= 0 {
+			return nil, err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	}
+
 	// Join consumer group
 	join, err := c.joinGroupRequest(coordinator, topics)
 	if err != nil {
@@ -195,19 +216,19 @@
 		c.memberID = join.MemberId
 	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
 		c.memberID = ""
-		return c.newSession(ctx, coordinator, topics, handler, retries)
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+		if retries <= 0 {
+			return nil, join.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
 	case ErrRebalanceInProgress: // retry after backoff
 		if retries <= 0 {
 			return nil, join.Err
 		}
 
-		select {
-		case <-c.closed:
-			return nil, ErrClosedConsumerGroup
-		case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
-		}
-
-		return c.newSession(ctx, coordinator, topics, handler, retries-1)
+		return c.retryNewSession(ctx, topics, handler, retries, false)
 	default:
 		return nil, join.Err
 	}
@@ -236,19 +257,19 @@
 	case ErrNoError:
 	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
 		c.memberID = ""
-		return c.newSession(ctx, coordinator, topics, handler, retries)
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+		if retries <= 0 {
+			return nil, sync.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
 	case ErrRebalanceInProgress: // retry after backoff
 		if retries <= 0 {
 			return nil, sync.Err
 		}
 
-		select {
-		case <-c.closed:
-			return nil, ErrClosedConsumerGroup
-		case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
-		}
-
-		return c.newSession(ctx, coordinator, topics, handler, retries-1)
+		return c.retryNewSession(ctx, topics, handler, retries, false)
 	default:
 		return nil, sync.Err
 	}
@@ -613,7 +634,7 @@
 	s.releaseOnce.Do(func() {
 		if withCleanup {
 			if e := s.handler.Cleanup(s); e != nil {
-				s.parent.handleError(err, "", -1)
+				s.parent.handleError(e, "", -1)
 				err = e
 			}
 		}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go
new file mode 100644
index 0000000..9b75ab5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/control_record.go
@@ -0,0 +1,72 @@
+package sarama
+
+//ControlRecordType ...
+type ControlRecordType int
+
+const (
+	//ControlRecordAbort is a control record for abort
+	ControlRecordAbort ControlRecordType = iota
+	//ControlRecordCommit is a control record for commit
+	ControlRecordCommit
+	//ControlRecordUnknown is a control record of unknown type
+	ControlRecordUnknown
+)
+
+// Control records are returned as a record by fetchRequest
+// However unlike "normal" records, they mean nothing application wise.
+// They only serve internal logic for supporting transactions.
+type ControlRecord struct {
+	Version          int16
+	CoordinatorEpoch int32
+	Type             ControlRecordType
+}
+
+func (cr *ControlRecord) decode(key, value packetDecoder) error {
+	var err error
+	cr.Version, err = value.getInt16()
+	if err != nil {
+		return err
+	}
+
+	cr.CoordinatorEpoch, err = value.getInt32()
+	if err != nil {
+		return err
+	}
+
+	// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
+	// Either way, all these version can only be 0 for now
+	cr.Version, err = key.getInt16()
+	if err != nil {
+		return err
+	}
+
+	recordType, err := key.getInt16()
+	if err != nil {
+		return err
+	}
+
+	switch recordType {
+	case 0:
+		cr.Type = ControlRecordAbort
+	case 1:
+		cr.Type = ControlRecordCommit
+	default:
+		// from JAVA implementation:
+		// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
+		cr.Type = ControlRecordUnknown
+	}
+	return nil
+}
+
+func (cr *ControlRecord) encode(key, value packetEncoder) {
+	value.putInt16(cr.Version)
+	value.putInt32(cr.CoordinatorEpoch)
+	key.putInt16(cr.Version)
+
+	switch cr.Type {
+	case ControlRecordAbort:
+		key.putInt16(0)
+	case ControlRecordCommit:
+		key.putInt16(1)
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
index 1f14443..38189a3 100644
--- a/vendor/github.com/Shopify/sarama/crc32_field.go
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -4,6 +4,7 @@
 	"encoding/binary"
 	"fmt"
 	"hash/crc32"
+	"sync"
 )
 
 type crcPolynomial int8
@@ -13,6 +14,22 @@
 	crcCastagnoli
 )
 
+var crc32FieldPool = sync.Pool{}
+
+func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
+	val := crc32FieldPool.Get()
+	if val != nil {
+		c := val.(*crc32Field)
+		c.polynomial = polynomial
+		return c
+	}
+	return newCRC32Field(polynomial)
+}
+
+func releaseCrc32Field(c *crc32Field) {
+	crc32FieldPool.Put(c)
+}
+
 var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
 
 // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
index abd621c..bb18204 100644
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -1,6 +1,9 @@
 package sarama
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 type CreatePartitionsResponse struct {
 	ThrottleTime         time.Duration
@@ -69,6 +72,14 @@
 	ErrMsg *string
 }
 
+func (t *TopicPartitionError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
 func (t *TopicPartitionError) encode(pe packetEncoder) error {
 	pe.putInt16(int16(t.Err))
 
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
index 66207e0..a493e02 100644
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -1,6 +1,9 @@
 package sarama
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 type CreateTopicsResponse struct {
 	Version      int16
@@ -83,6 +86,14 @@
 	ErrMsg *string
 }
 
+func (t *TopicError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
 func (t *TopicError) encode(pe packetEncoder, version int16) error {
 	pe.putInt16(int16(t.Err))
 
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
index 63fb6ea..5737232 100644
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -26,12 +26,12 @@
 }
 
 const (
-	SourceUnknown              ConfigSource = 0
-	SourceTopic                ConfigSource = 1
-	SourceDynamicBroker        ConfigSource = 2
-	SourceDynamicDefaultBroker ConfigSource = 3
-	SourceStaticBroker         ConfigSource = 4
-	SourceDefault              ConfigSource = 5
+	SourceUnknown ConfigSource = iota
+	SourceTopic
+	SourceDynamicBroker
+	SourceDynamicDefaultBroker
+	SourceStaticBroker
+	SourceDefault
 )
 
 type DescribeConfigsResponse struct {
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
index 97eed3a..3f4d569 100644
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -2,7 +2,7 @@
 
 up:
   - go:
-      version: '1.11'
+      version: '1.12'
 
 commands:
   test:
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
index 87a4c61..c6a8be7 100644
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -81,6 +81,28 @@
 // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
 type KError int16
 
+// MultiError is used to contain multi error.
+type MultiError struct {
+	Errors *[]error
+}
+
+func (mErr MultiError) Error() string {
+	var errString = ""
+	for _, err := range *mErr.Errors {
+		errString += err.Error() + ","
+	}
+	return errString
+}
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+type ErrDeleteRecords struct {
+	MultiError
+}
+
+func (err ErrDeleteRecords) Error() string {
+	return "kafka server: failed to delete records " + err.MultiError.Error()
+}
+
 // Numeric error codes returned by the Kafka server.
 const (
 	ErrNoError                            KError = 0
@@ -161,6 +183,11 @@
 	ErrFencedLeaderEpoch                  KError = 74
 	ErrUnknownLeaderEpoch                 KError = 75
 	ErrUnsupportedCompressionType         KError = 76
+	ErrStaleBrokerEpoch                   KError = 77
+	ErrOffsetNotAvailable                 KError = 78
+	ErrMemberIdRequired                   KError = 79
+	ErrPreferredLeaderNotAvailable        KError = 80
+	ErrGroupMaxSizeReached                KError = 81
 )
 
 func (err KError) Error() string {
@@ -323,6 +350,16 @@
 		return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
 	case ErrUnsupportedCompressionType:
 		return "kafka server: The requesting client does not support the compression type of given partition."
+	case ErrStaleBrokerEpoch:
+		return "kafka server: Broker epoch has changed"
+	case ErrOffsetNotAvailable:
+		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+	case ErrMemberIdRequired:
+		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+	case ErrPreferredLeaderNotAvailable:
+		return "kafka server: The preferred leader was not available"
+	case ErrGroupMaxSizeReached:
+		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
 	}
 
 	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
index 462ab8a..4db9ddd 100644
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -36,8 +36,8 @@
 type IsolationLevel int8
 
 const (
-	ReadUncommitted IsolationLevel = 0
-	ReadCommitted   IsolationLevel = 1
+	ReadUncommitted IsolationLevel = iota
+	ReadCommitted
 )
 
 func (r *FetchRequest) encode(pe packetEncoder) (err error) {
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
index 9df99c1..3afc187 100644
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -1,6 +1,7 @@
 package sarama
 
 import (
+	"sort"
 	"time"
 )
 
@@ -185,6 +186,17 @@
 	return pe.pop()
 }
 
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+	at := b.AbortedTransactions
+	sort.Slice(
+		at,
+		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+	)
+	return at
+}
+
 type FetchResponse struct {
 	Blocks        map[string]map[int32]*FetchResponseBlock
 	ThrottleTime  time.Duration
@@ -385,6 +397,65 @@
 	batch.addRecord(rec)
 }
 
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+
+	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: isTransactional,
+	}
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+	records.RecordBatch = batch
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+
+	// batch
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: true,
+		Control:         true,
+	}
+
+	// records
+	records := newDefaultRecords(nil)
+	records.RecordBatch = batch
+
+	// record
+	crAbort := ControlRecord{
+		Version: 0,
+		Type:    recordType,
+	}
+	crKey := &realEncoder{raw: make([]byte, 4)}
+	crValue := &realEncoder{raw: make([]byte, 6)}
+	crAbort.encode(crKey, crValue)
+	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
 func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
 	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
 }
@@ -393,6 +464,15 @@
 	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
 }
 
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+	// define controlRecord key and value
+	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
 func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
 	frb := r.getOrCreateBlock(topic, partition)
 	if len(frb.RecordsSet) == 0 {
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
index 0ab5cb5..ff2ad20 100644
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -3,8 +3,8 @@
 type CoordinatorType int8
 
 const (
-	CoordinatorGroup       CoordinatorType = 0
-	CoordinatorTransaction CoordinatorType = 1
+	CoordinatorGroup CoordinatorType = iota
+	CoordinatorTransaction
 )
 
 type FindCoordinatorRequest struct {
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
index 3715129..8c45155 100644
--- a/vendor/github.com/Shopify/sarama/go.mod
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -1,13 +1,24 @@
 module github.com/Shopify/sarama
 
 require (
-	github.com/DataDog/zstd v1.3.5
+	github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798
 	github.com/Shopify/toxiproxy v2.1.4+incompatible
 	github.com/davecgh/go-spew v1.1.1
 	github.com/eapache/go-resiliency v1.1.0
 	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
 	github.com/eapache/queue v1.1.0
-	github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
-	github.com/pierrec/lz4 v2.0.5+incompatible
+	github.com/golang/snappy v0.0.1 // indirect
+	github.com/hashicorp/go-uuid v1.0.1 // indirect
+	github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03
+	github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41
 	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
+	github.com/stretchr/testify v1.3.0
+	github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
+	github.com/xdg/stringprep v1.0.0 // indirect
+	golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
+	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+	gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
+	gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
+	gopkg.in/jcmturner/gokrb5.v7 v7.2.3
+	gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
 )
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
index 58e2e91..4dbc6d2 100644
--- a/vendor/github.com/Shopify/sarama/go.sum
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -1,7 +1,8 @@
-github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
-github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
+github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
@@ -10,9 +11,41 @@
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
+github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..49b632d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
@@ -0,0 +1,257 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
+	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+	"gopkg.in/jcmturner/gokrb5.v7/messages"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+	"io"
+	"strings"
+	"time"
+)
+
+const (
+	TOK_ID_KRB_AP_REQ   = 256
+	GSS_API_GENERIC_TAG = 0x60
+	KRB5_USER_AUTH      = 1
+	KRB5_KEYTAB_AUTH    = 2
+	GSS_API_INITIAL     = 1
+	GSS_API_VERIFY      = 2
+	GSS_API_FINISH      = 3
+)
+
+type GSSAPIConfig struct {
+	AuthType           int
+	KeyTabPath         string
+	KerberosConfigPath string
+	ServiceName        string
+	Username           string
+	Password           string
+	Realm              string
+}
+
+type GSSAPIKerberosAuth struct {
+	Config                *GSSAPIConfig
+	ticket                messages.Ticket
+	encKey                types.EncryptionKey
+	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+	step                  int
+}
+
+type KerberosClient interface {
+	Login() error
+	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+	Domain() string
+	CName() types.PrincipalName
+	Destroy()
+}
+
+/*
+*
+* Appends length in big endian before payload, and send it to kafka
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+	length := len(payload)
+	finalPackage := make([]byte, length+4) //4 byte length header + payload
+	copy(finalPackage[4:], payload)
+	binary.BigEndian.PutUint32(finalPackage, uint32(length))
+	bytes, err := broker.conn.Write(finalPackage)
+	if err != nil {
+		return bytes, err
+	}
+	return bytes, nil
+}
+
+/*
+*
+* Read length (4 bytes) and then read the payload
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+	bytesRead := 0
+	lengthInBytes := make([]byte, 4)
+	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+	if err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += bytes
+	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+	payloadBytes := make([]byte, payloadLength)         // buffer for read..
+	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+	if err != nil {
+		return payloadBytes, bytesRead, err
+	}
+	bytesRead += bytes
+	return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+	a := make([]byte, 24)
+	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+	binary.LittleEndian.PutUint32(a[:4], 16)
+	for _, i := range flags {
+		f := binary.LittleEndian.Uint32(a[20:24])
+		f |= uint32(i)
+		binary.LittleEndian.PutUint32(a[20:24], f)
+	}
+	return a
+}
+
+/*
+*
+* Construct Kerberos AP_REQ package, conforming to RFC-4120
+* https://tools.ietf.org/html/rfc4120#page-84
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+	domain string, cname types.PrincipalName,
+	ticket messages.Ticket,
+	sessionKey types.EncryptionKey) ([]byte, error) {
+	auth, err := types.NewAuthenticator(domain, cname)
+	if err != nil {
+		return nil, err
+	}
+	auth.Cksum = types.Checksum{
+		CksumType: chksumtype.GSSAPI,
+		Checksum:  krbAuth.newAuthenticatorChecksum(),
+	}
+	APReq, err := messages.NewAPReq(
+		ticket,
+		sessionKey,
+		auth,
+	)
+	if err != nil {
+		return nil, err
+	}
+	aprBytes := make([]byte, 2)
+	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+	tb, err := APReq.Marshal()
+	if err != nil {
+		return nil, err
+	}
+	aprBytes = append(aprBytes, tb...)
+	return aprBytes, nil
+}
+
+/*
+*
+*	Append the GSS-API header to the payload, conforming to RFC-2743
+*	Section 3.1, Mechanism-Independent Token Format
+*
+*	https://tools.ietf.org/html/rfc2743#page-81
+*
+*	GSSAPIHeader + <specific mechanism payload>
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+	oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
+	if err != nil {
+		return nil, err
+	}
+	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+	GSSHeader = append(GSSHeader, oidBytes...)
+	GSSPackage := append(GSSHeader, payload...)
+	return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
+	switch krbAuth.step {
+	case GSS_API_INITIAL:
+		aprBytes, err := krbAuth.createKrb5Token(
+			kerberosClient.Domain(),
+			kerberosClient.CName(),
+			krbAuth.ticket,
+			krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_VERIFY
+		return krbAuth.appendGSSAPIHeader(aprBytes)
+	case GSS_API_VERIFY:
+		wrapTokenReq := gssapi.WrapToken{}
+		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+			return nil, err
+		}
+		// Validate response.
+		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if !isValid {
+			return nil, err
+		}
+
+		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_FINISH
+		return wrapTokenResponse.Marshal()
+	}
+	return nil, nil
+}
+
+/* This does the handshake for authorization */
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+
+	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client error: %s", err)
+		return err
+	}
+
+	err = kerberosClient.Login()
+	if err != nil {
+		Logger.Printf("Kerberos client error: %s", err)
+		return err
+	}
+	// Construct SPN using serviceName and host
+	// SPN format: <SERVICE>/<FQDN>
+
+	host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
+	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+
+	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
+
+	if err != nil {
+		Logger.Printf("Error getting Kerberos service ticket : %s", err)
+		return err
+	}
+	krbAuth.ticket = ticket
+	krbAuth.encKey = encKey
+	krbAuth.step = GSS_API_INITIAL
+	var receivedBytes []byte = nil
+	defer kerberosClient.Destroy()
+	for {
+		packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
+		if err != nil {
+			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+			return err
+		}
+		requestTime := time.Now()
+		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+		if err != nil {
+			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+			return err
+		}
+		broker.updateOutgoingCommunicationMetrics(bytesWritten)
+		if krbAuth.step == GSS_API_VERIFY {
+			var bytesRead = 0
+			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+			requestLatency := time.Since(requestTime)
+			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+			if err != nil {
+				Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+				return err
+			}
+		} else if krbAuth.step == GSS_API_FINISH {
+			return nil
+		}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
new file mode 100644
index 0000000..91b998f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/kerberos_client.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+	krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
+	krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
+	"gopkg.in/jcmturner/gokrb5.v7/keytab"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KerberosGoKrb5Client struct {
+	krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+	return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+	return c.Credentials.CName()
+}
+
+/*
+*
+* Create kerberos client used to obtain TGT and TGS tokens
+* used gokrb5 library, which is a pure go kerberos client with
+* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
+* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
+*
+ */
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+	cfg, err := krb5config.Load(config.KerberosConfigPath)
+	if err != nil {
+		return nil, err
+	}
+	return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+	var client *krb5client.Client
+	if config.AuthType == KRB5_KEYTAB_AUTH {
+		kt, err := keytab.Load(config.KeyTabPath)
+		if err != nil {
+			return nil, err
+		}
+		client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
+	} else {
+		client = krb5client.NewClientWithPassword(config.Username,
+			config.Realm, config.Password, cfg)
+	}
+	return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
index da199a7..7d864f6 100644
--- a/vendor/github.com/Shopify/sarama/length_field.go
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -1,6 +1,9 @@
 package sarama
 
-import "encoding/binary"
+import (
+	"encoding/binary"
+	"sync"
+)
 
 // LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
 type lengthField struct {
@@ -8,6 +11,20 @@
 	length      int32
 }
 
+var lengthFieldPool = sync.Pool{}
+
+func acquireLengthField() *lengthField {
+	val := lengthFieldPool.Get()
+	if val != nil {
+		return val.(*lengthField)
+	}
+	return &lengthField{}
+}
+
+func releaseLengthField(m *lengthField) {
+	lengthFieldPool.Put(m)
+}
+
 func (l *lengthField) decode(pd packetDecoder) error {
 	var err error
 	l.length, err = pd.getInt32()
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
index f64c79b..7c54748 100644
--- a/vendor/github.com/Shopify/sarama/message.go
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -5,37 +5,44 @@
 	"time"
 )
 
-// The lowest 3 bits contain the compression codec used for the message
-const compressionCodecMask int8 = 0x07
+const (
+	//CompressionNone no compression
+	CompressionNone CompressionCodec = iota
+	//CompressionGZIP compression using GZIP
+	CompressionGZIP
+	//CompressionSnappy compression using snappy
+	CompressionSnappy
+	//CompressionLZ4 compression using LZ4
+	CompressionLZ4
+	//CompressionZSTD compression using ZSTD
+	CompressionZSTD
 
-// Bit 3 set for "LogAppend" timestamps
-const timestampTypeMask = 0x08
+	// The lowest 3 bits contain the compression codec used for the message
+	compressionCodecMask int8 = 0x07
+
+	// Bit 3 set for "LogAppend" timestamps
+	timestampTypeMask = 0x08
+
+	// CompressionLevelDefault is the constant to use in CompressionLevel
+	// to have the default compression level for any codec. The value is picked
+	// that we don't use any existing compression levels.
+	CompressionLevelDefault = -1000
+)
 
 // CompressionCodec represents the various compression codecs recognized by Kafka in messages.
 type CompressionCodec int8
 
-const (
-	CompressionNone   CompressionCodec = 0
-	CompressionGZIP   CompressionCodec = 1
-	CompressionSnappy CompressionCodec = 2
-	CompressionLZ4    CompressionCodec = 3
-	CompressionZSTD   CompressionCodec = 4
-)
-
 func (cc CompressionCodec) String() string {
 	return []string{
 		"none",
 		"gzip",
 		"snappy",
 		"lz4",
+		"zstd",
 	}[int(cc)]
 }
 
-// CompressionLevelDefault is the constant to use in CompressionLevel
-// to have the default compression level for any codec. The value is picked
-// that we don't use any existing compression levels.
-const CompressionLevelDefault = -1000
-
+//Message is a kafka message type
 type Message struct {
 	Codec            CompressionCodec // codec used to compress the message contents
 	CompressionLevel int              // compression level
@@ -96,7 +103,10 @@
 }
 
 func (m *Message) decode(pd packetDecoder) (err error) {
-	err = pd.push(newCRC32Field(crcIEEE))
+	crc32Decoder := acquireCrc32Field(crcIEEE)
+	defer releaseCrc32Field(crc32Decoder)
+
+	err = pd.push(crc32Decoder)
 	if err != nil {
 		return err
 	}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
index 600c7c4..6523ec2 100644
--- a/vendor/github.com/Shopify/sarama/message_set.go
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -29,7 +29,10 @@
 		return err
 	}
 
-	if err = pd.push(&lengthField{}); err != nil {
+	lengthDecoder := acquireLengthField()
+	defer releaseLengthField(lengthDecoder)
+
+	if err = pd.push(lengthDecoder); err != nil {
 		return err
 	}
 
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
index 17dc428..1b590d3 100644
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -37,15 +37,8 @@
 	if err != nil {
 		return err
 	}
-	if size < 0 {
-		return nil
-	} else {
-		topicCount := size
-		if topicCount == 0 {
-			return nil
-		}
-
-		r.Topics = make([]string, topicCount)
+	if size > 0 {
+		r.Topics = make([]string, size)
 		for i := range r.Topics {
 			topic, err := pd.getString()
 			if err != nil {
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
index c402d05..b2d532e 100644
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -296,7 +296,7 @@
 	return tmatch
 }
 
-func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
 	tmatch := r.AddTopic(topic, ErrNoError)
 	var pmatch *PartitionMetadata
 
@@ -316,6 +316,7 @@
 	pmatch.Leader = brokerID
 	pmatch.Replicas = replicas
 	pmatch.Isr = isr
+	pmatch.OfflineReplicas = offline
 	pmatch.Err = err
 
 }
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
index 4869708..90e5a87 100644
--- a/vendor/github.com/Shopify/sarama/metrics.go
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -28,14 +28,6 @@
 	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
 }
 
-func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
-	return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
-}
-
-func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
-	return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
-}
-
 func getMetricNameForTopic(name string, topic string) string {
 	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
 	// cf. KAFKA-1902 and KAFKA-2337
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
index 55ef1e2..4ed46a6 100644
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -18,6 +18,8 @@
 	expectationTimeout = 500 * time.Millisecond
 )
 
+type GSSApiHandlerFunc func([]byte) []byte
+
 type requestHandlerFunc func(req *request) (res encoder)
 
 // RequestNotifierFunc is invoked when a mock broker processes a request successfully
@@ -49,18 +51,19 @@
 // It is not necessary to prefix message length or correlation ID to your
 // response bytes, the server does that automatically as a convenience.
 type MockBroker struct {
-	brokerID     int32
-	port         int32
-	closing      chan none
-	stopper      chan none
-	expectations chan encoder
-	listener     net.Listener
-	t            TestReporter
-	latency      time.Duration
-	handler      requestHandlerFunc
-	notifier     RequestNotifierFunc
-	history      []RequestResponse
-	lock         sync.Mutex
+	brokerID      int32
+	port          int32
+	closing       chan none
+	stopper       chan none
+	expectations  chan encoder
+	listener      net.Listener
+	t             TestReporter
+	latency       time.Duration
+	handler       requestHandlerFunc
+	notifier      RequestNotifierFunc
+	history       []RequestResponse
+	lock          sync.Mutex
+	gssApiHandler GSSApiHandlerFunc
 }
 
 // RequestResponse represents a Request/Response pair processed by MockBroker.
@@ -173,6 +176,43 @@
 	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
 }
 
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+	b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	fullBytes := append(lengthBytes, encodedReq...)
+
+	return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
 func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
 	defer wg.Done()
 	defer func() {
@@ -192,59 +232,92 @@
 	}()
 
 	resHeader := make([]byte, 8)
+	var bytesWritten int
+	var bytesRead int
 	for {
-		req, bytesRead, err := decodeRequest(conn)
+
+		buffer, err := b.readToBytes(conn)
 		if err != nil {
-			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
 			b.serverError(err)
 			break
 		}
 
-		if b.latency > 0 {
-			time.Sleep(b.latency)
-		}
+		bytesWritten = 0
+		if !b.isGSSAPI(buffer) {
 
-		b.lock.Lock()
-		res := b.handler(req)
-		b.history = append(b.history, RequestResponse{req.body, res})
-		b.lock.Unlock()
-
-		if res == nil {
-			Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
-			continue
-		}
-		Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
-
-		encodedRes, err := encode(res, nil)
-		if err != nil {
-			b.serverError(err)
-			break
-		}
-		if len(encodedRes) == 0 {
-			b.lock.Lock()
-			if b.notifier != nil {
-				b.notifier(bytesRead, 0)
+			req, br, err := decodeRequest(bytes.NewReader(buffer))
+			bytesRead = br
+			if err != nil {
+				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+				b.serverError(err)
+				break
 			}
-			b.lock.Unlock()
-			continue
-		}
 
-		binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
-		binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
-		if _, err = conn.Write(resHeader); err != nil {
-			b.serverError(err)
-			break
-		}
-		if _, err = conn.Write(encodedRes); err != nil {
-			b.serverError(err)
-			break
+			if b.latency > 0 {
+				time.Sleep(b.latency)
+			}
+
+			b.lock.Lock()
+			res := b.handler(req)
+			b.history = append(b.history, RequestResponse{req.body, res})
+			b.lock.Unlock()
+
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+				continue
+			}
+			Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+			encodedRes, err := encode(res, nil)
+			if err != nil {
+				b.serverError(err)
+				break
+			}
+			if len(encodedRes) == 0 {
+				b.lock.Lock()
+				if b.notifier != nil {
+					b.notifier(bytesRead, 0)
+				}
+				b.lock.Unlock()
+				continue
+			}
+
+			binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+			binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+			if _, err = conn.Write(resHeader); err != nil {
+				b.serverError(err)
+				break
+			}
+			if _, err = conn.Write(encodedRes); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(resHeader) + len(encodedRes)
+
+		} else {
+			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+			b.lock.Lock()
+			res := b.gssApiHandler(buffer)
+			b.lock.Unlock()
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+				continue
+			}
+			if _, err = conn.Write(res); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(res)
 		}
 
 		b.lock.Lock()
 		if b.notifier != nil {
-			b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+			b.notifier(bytesRead, bytesWritten)
 		}
 		b.lock.Unlock()
+
 	}
 	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
 }
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go
new file mode 100644
index 0000000..affeb2d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockkerberos.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"gopkg.in/jcmturner/gokrb5.v7/credentials"
+	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+	"gopkg.in/jcmturner/gokrb5.v7/messages"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KafkaGSSAPIHandler struct {
+	client         *MockKerberosClient
+	badResponse    bool
+	badKeyChecksum bool
+}
+
+func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
+	// Default payload used for verify
+	err := h.client.Login() // Mock client construct keys when login
+	if err != nil {
+		return nil
+	}
+	if h.badResponse { // Returns trash
+		return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
+	}
+
+	var pack = gssapi.WrapToken{
+		Flags:     KRB5_USER_AUTH,
+		EC:        12,
+		RRC:       0,
+		SndSeqNum: 3398292281,
+		Payload:   []byte{0x11, 0x00}, // 1100
+	}
+	// Compute checksum
+	if h.badKeyChecksum {
+		pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+	} else {
+		err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if err != nil {
+			return nil
+		}
+	}
+
+	packBytes, err := pack.Marshal()
+	if err != nil {
+		return nil
+	}
+	lenBytes := len(packBytes)
+	response := make([]byte, lenBytes+4)
+	copy(response[4:], packBytes)
+	binary.BigEndian.PutUint32(response, uint32(lenBytes))
+	return response
+}
+
+type MockKerberosClient struct {
+	asReqBytes  string
+	asRepBytes  string
+	ASRep       messages.ASRep
+	credentials *credentials.Credentials
+	mockError   error
+	errorStage  string
+}
+
+func (c *MockKerberosClient) Login() error {
+	if c.errorStage == "login" && c.mockError != nil {
+		return c.mockError
+	}
+	c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
+		"558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
+		"4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
+		"7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
+		"d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
+		"549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
+		"2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
+		"7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
+		"997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
+		"482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
+		"03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
+		"331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
+		"aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
+		"da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
+		"eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
+	apRepBytes, err := hex.DecodeString(c.asRepBytes)
+	if err != nil {
+		return err
+	}
+	err = c.ASRep.Unmarshal(apRepBytes)
+	if err != nil {
+		return err
+	}
+	c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
+	_, err = c.ASRep.DecryptEncPart(c.credentials)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
+	if c.errorStage == "service_ticket" && c.mockError != nil {
+		return messages.Ticket{}, types.EncryptionKey{}, c.mockError
+	}
+	return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
+}
+
+func (c *MockKerberosClient) Domain() string {
+	return "EXAMPLE.COM"
+}
+func (c *MockKerberosClient) CName() types.PrincipalName {
+	var p = types.PrincipalName{
+		NameType: KRB5_USER_AUTH,
+		NameString: []string{
+			"kafka",
+			"kafka",
+		},
+	}
+	return p
+}
+func (c *MockKerberosClient) Destroy() {
+	// Do nothing.
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
index 348c223..c78f0ac 100644
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -2,6 +2,7 @@
 
 import (
 	"fmt"
+	"strings"
 )
 
 // TestReporter has methods matching go's testing.T to avoid importing
@@ -177,7 +178,7 @@
 
 	// Generate set of replicas
 	replicas := []int32{}
-
+	offlineReplicas := []int32{}
 	for _, brokerID := range mmr.brokers {
 		replicas = append(replicas, brokerID)
 	}
@@ -185,14 +186,14 @@
 	if len(metadataRequest.Topics) == 0 {
 		for topic, partitions := range mmr.leaders {
 			for partition, brokerID := range partitions {
-				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, ErrNoError)
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
 			}
 		}
 		return metadataResponse
 	}
 	for _, topic := range metadataRequest.Topics {
 		for partition, brokerID := range mmr.leaders[topic] {
-			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, ErrNoError)
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
 		}
 	}
 	return metadataResponse
@@ -573,6 +574,7 @@
 // MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
 type MockOffsetFetchResponse struct {
 	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	error   KError
 	t       TestReporter
 }
 
@@ -598,15 +600,25 @@
 	return mr
 }
 
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+	mr.error = kerror
+	return mr
+}
+
 func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
 	req := reqBody.(*OffsetFetchRequest)
 	group := req.ConsumerGroup
-	res := &OffsetFetchResponse{}
+	res := &OffsetFetchResponse{Version: req.Version}
+
 	for topic, partitions := range mr.offsets[group] {
 		for partition, block := range partitions {
 			res.AddBlock(topic, partition, block)
 		}
 	}
+
+	if res.Version >= 2 {
+		res.Err = mr.error
+	}
 	return res
 }
 
@@ -620,10 +632,20 @@
 
 func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
 	req := reqBody.(*CreateTopicsRequest)
-	res := &CreateTopicsResponse{}
+	res := &CreateTopicsResponse{
+		Version: req.Version,
+	}
 	res.TopicErrors = make(map[string]*TopicError)
 
-	for topic, _ := range req.TopicDetails {
+	for topic := range req.TopicDetails {
+		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create topic with reserved prefix"
+			res.TopicErrors[topic] = &TopicError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
 		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
 	}
 	return res
@@ -661,7 +683,15 @@
 	res := &CreatePartitionsResponse{}
 	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
 
-	for topic, _ := range req.TopicPartitions {
+	for topic := range req.TopicPartitions {
+		if strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create partition on topic with reserved prefix"
+			res.TopicPartitionErrors[topic] = &TopicPartitionError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
 		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
 	}
 	return res
@@ -682,7 +712,7 @@
 
 	for topic, deleteRecordRequestTopic := range req.Topics {
 		partitions := make(map[int32]*DeleteRecordsResponsePartition)
-		for partition, _ := range deleteRecordRequestTopic.PartitionOffsets {
+		for partition := range deleteRecordRequestTopic.PartitionOffsets {
 			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
 		}
 		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
@@ -866,3 +896,26 @@
 	}
 	return res
 }
+
+type MockDeleteGroupsResponse struct {
+	deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+	return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+	m.deletedGroups = groups
+	return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder {
+	resp := &DeleteGroupsResponse{
+		GroupErrorCodes: map[string]KError{},
+	}
+	for _, group := range m.deletedGroups {
+		resp.GroupErrorCodes[group] = ErrNoError
+	}
+	return resp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
index 1ec583e..5732ed9 100644
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -200,11 +200,11 @@
 func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
 	partitions := r.blocks[topic]
 	if partitions == nil {
-		return 0, "", errors.New("No such offset")
+		return 0, "", errors.New("no such offset")
 	}
 	block := partitions[partitionID]
 	if block == nil {
-		return 0, "", errors.New("No such offset")
+		return 0, "", errors.New("no such offset")
 	}
 	return block.offset, block.metadata, nil
 }
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
index 2432f7b..923972f 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -333,7 +333,6 @@
 				pom.handleError(err)
 			case ErrOffsetsLoadInProgress:
 				// nothing wrong but we didn't commit, we'll get it next time round
-				break
 			case ErrUnknownTopicOrPartition:
 				// let the user know *and* try redispatching - if topic-auto-create is
 				// enabled, redispatching should trigger a metadata req and create the
@@ -576,6 +575,6 @@
 
 func (pom *partitionOffsetManager) release() {
 	pom.releaseOnce.Do(func() {
-		go close(pom.errors)
+		close(pom.errors)
 	})
 }
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
index 74805cc..9be854c 100644
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -27,6 +27,7 @@
 	remaining() int
 	getSubset(length int) (packetDecoder, error)
 	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
 
 	// Stacks, see PushDecoder
 	push(in pushDecoder) error
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
index 4b42d9c..bba0f7e 100644
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -81,7 +81,7 @@
 
 	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
 		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
-			return errors.New("Assertion failed: Message out of sequence added to a batch")
+			return errors.New("assertion failed: message out of sequence added to a batch")
 		}
 		// We are being conservative here to avoid having to prep encode the record
 		size += maximumRecordOverhead
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
index 23045e7..085cbb3 100644
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -290,6 +290,14 @@
 	return &realDecoder{raw: rd.raw[off : off+length]}, nil
 }
 
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+	const byteLen = 1
+	if rd.remaining() < offset+byteLen {
+		return -1, ErrInsufficientData
+	}
+	return int8(rd.raw[rd.off+offset]), nil
+}
+
 // stacks
 
 func (rd *realDecoder) push(in pushDecoder) error {
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
index cded308..cdccfe3 100644
--- a/vendor/github.com/Shopify/sarama/record.go
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -6,10 +6,12 @@
 )
 
 const (
+	isTransactionalMask   = 0x10
 	controlMask           = 0x20
 	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
 )
 
+//RecordHeader stores key and value for a record header
 type RecordHeader struct {
 	Key   []byte
 	Value []byte
@@ -33,15 +35,16 @@
 	return nil
 }
 
+//Record is kafka record type
 type Record struct {
+	Headers []*RecordHeader
+
 	Attributes     int8
 	TimestampDelta time.Duration
 	OffsetDelta    int64
 	Key            []byte
 	Value          []byte
-	Headers        []*RecordHeader
-
-	length varintLengthField
+	length         varintLengthField
 }
 
 func (r *Record) encode(pe packetEncoder) error {
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
index a36f7e6..c653763 100644
--- a/vendor/github.com/Shopify/sarama/record_batch.go
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -45,11 +45,16 @@
 	FirstSequence         int32
 	Records               []*Record
 	PartialTrailingRecord bool
+	IsTransactional       bool
 
 	compressedRecords []byte
 	recordsLen        int // uncompressed records size
 }
 
+func (b *RecordBatch) LastOffset() int64 {
+	return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
 func (b *RecordBatch) encode(pe packetEncoder) error {
 	if b.Version != 2 {
 		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
@@ -111,7 +116,10 @@
 		return err
 	}
 
-	if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
+	crc32Decoder := acquireCrc32Field(crcCastagnoli)
+	defer releaseCrc32Field(crc32Decoder)
+
+	if err = pd.push(crc32Decoder); err != nil {
 		return err
 	}
 
@@ -122,6 +130,7 @@
 	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
 	b.Control = attributes&controlMask == controlMask
 	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
+	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
 
 	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
 		return err
@@ -205,6 +214,9 @@
 	if b.LogAppendTime {
 		attr |= timestampTypeMask
 	}
+	if b.IsTransactional {
+		attr |= isTransactionalMask
+	}
 	return attr
 }
 
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
index 192f592..98160c7 100644
--- a/vendor/github.com/Shopify/sarama/records.go
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -185,10 +185,20 @@
 }
 
 func magicValue(pd packetDecoder) (int8, error) {
-	dec, err := pd.peek(magicOffset, magicLength)
-	if err != nil {
-		return 0, err
+	return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+	if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
+		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
 	}
 
-	return dec.getInt8()
+	firstRecord := r.RecordBatch.Records[0]
+	controlRecord := ControlRecord{}
+	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+	if err != nil {
+		return ControlRecord{}, err
+	}
+
+	return controlRecord, nil
 }
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
index f7eea59..5ed8ca4 100644
--- a/vendor/github.com/Shopify/sarama/request.go
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -20,51 +20,67 @@
 	body          protocolBody
 }
 
-func (r *request) encode(pe packetEncoder) (err error) {
+func (r *request) encode(pe packetEncoder) error {
 	pe.push(&lengthField{})
 	pe.putInt16(r.body.key())
 	pe.putInt16(r.body.version())
 	pe.putInt32(r.correlationID)
-	err = pe.putString(r.clientID)
+
+	err := pe.putString(r.clientID)
 	if err != nil {
 		return err
 	}
+
 	err = r.body.encode(pe)
 	if err != nil {
 		return err
 	}
+
 	return pe.pop()
 }
 
 func (r *request) decode(pd packetDecoder) (err error) {
-	var key int16
-	if key, err = pd.getInt16(); err != nil {
+	key, err := pd.getInt16()
+	if err != nil {
 		return err
 	}
-	var version int16
-	if version, err = pd.getInt16(); err != nil {
+
+	version, err := pd.getInt16()
+	if err != nil {
 		return err
 	}
-	if r.correlationID, err = pd.getInt32(); err != nil {
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
 		return err
 	}
+
 	r.clientID, err = pd.getString()
+	if err != nil {
+		return err
+	}
 
 	r.body = allocateBody(key, version)
 	if r.body == nil {
 		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
 	}
+
 	return r.body.decode(pd, version)
 }
 
-func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
-	lengthBytes := make([]byte, 4)
+func decodeRequest(r io.Reader) (*request, int, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
 	if _, err := io.ReadFull(r, lengthBytes); err != nil {
 		return nil, bytesRead, err
 	}
-	bytesRead += len(lengthBytes)
 
+	bytesRead += len(lengthBytes)
 	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
 	if length <= 4 || length > MaxRequestSize {
 		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
 	}
@@ -73,12 +89,14 @@
 	if _, err := io.ReadFull(r, encodedReq); err != nil {
 		return nil, bytesRead, err
 	}
+
 	bytesRead += len(encodedReq)
 
-	req = &request{}
+	req := &request{}
 	if err := decode(encodedReq, req); err != nil {
 		return nil, bytesRead, err
 	}
+
 	return req, bytesRead, nil
 }
 
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
index f3f4d27..7a75918 100644
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -2,6 +2,9 @@
 
 import "fmt"
 
+const responseLengthSize = 4
+const correlationIDSize = 4
+
 type responseHeader struct {
 	length        int32
 	correlationID int32
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
index 7d5dc60..1e0277a 100644
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -10,10 +10,7 @@
 depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
 SyncProducer can still sometimes be lost.
 
-To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
-consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
-https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
-and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+To consume messages, use Consumer or Consumer-Group API.
 
 For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
 and message sent on the wire; the Client provides higher-level metadata management that is shared between
@@ -61,6 +58,14 @@
 	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
 	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
 
+Consumer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
 */
 package sarama
 
@@ -69,10 +74,29 @@
 	"log"
 )
 
-// Logger is the instance of a StdLogger interface that Sarama writes connection
-// management events to. By default it is set to discard all log messages via ioutil.Discard,
-// but you can set it to redirect wherever you want.
-var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+var (
+	// Logger is the instance of a StdLogger interface that Sarama writes connection
+	// management events to. By default it is set to discard all log messages via ioutil.Discard,
+	// but you can set it to redirect wherever you want.
+	Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+	PanicHandler func(interface{})
+
+	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+	// to process.
+	MaxRequestSize int32 = 100 * 1024 * 1024
+
+	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+	MaxResponseSize int32 = 100 * 1024 * 1024
+)
 
 // StdLogger is used to log error messages.
 type StdLogger interface {
@@ -80,20 +104,3 @@
 	Printf(format string, v ...interface{})
 	Println(v ...interface{})
 }
-
-// PanicHandler is called for recovering from panics spawned internally to the library (and thus
-// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
-var PanicHandler func(interface{})
-
-// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
-// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
-// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
-// to process.
-var MaxRequestSize int32 = 100 * 1024 * 1024
-
-// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
-// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
-// protect the client from running out of memory. Please note that brokers do not have any natural limit on
-// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
-// (see https://issues.apache.org/jira/browse/KAFKA-2063).
-var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
index 7dcbf03..7c815cd 100644
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -159,6 +159,8 @@
 	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
 	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
 	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
+	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
+	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
 
 	SupportedVersions = []KafkaVersion{
 		V0_8_2_0,
@@ -181,11 +183,14 @@
 		V2_0_0_0,
 		V2_0_1_0,
 		V2_1_0_0,
+		V2_2_0_0,
+		V2_3_0_0,
 	}
 	MinVersion = V0_8_2_0
-	MaxVersion = V2_1_0_0
+	MaxVersion = V2_3_0_0
 )
 
+//ParseKafkaVersion parses and returns kafka version or error from a string
 func ParseKafkaVersion(s string) (KafkaVersion, error) {
 	if len(s) < 5 {
 		return MinVersion, fmt.Errorf("invalid version `%s`", s)
diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml
new file mode 100644
index 0000000..87d230c
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - "1.x"
+
+env:
+  - GO111MODULE=on
+
+install:
+  - go get ./...
+
+script:
+  - go test ./...
diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod
new file mode 100644
index 0000000..88e1e98
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/go.mod
@@ -0,0 +1,16 @@
+module github.com/armon/go-metrics
+
+go 1.12
+
+require (
+	github.com/DataDog/datadog-go v2.2.0+incompatible
+	github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
+	github.com/circonus-labs/circonusllhist v0.1.3 // indirect
+	github.com/hashicorp/go-immutable-radix v1.0.0
+	github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
+	github.com/pascaldekloe/goe v0.1.0
+	github.com/pkg/errors v0.8.1 // indirect
+	github.com/prometheus/client_golang v0.9.2
+	github.com/stretchr/testify v1.3.0 // indirect
+	github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
+)
diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum
new file mode 100644
index 0000000..5ffd832
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/go.sum
@@ -0,0 +1,46 @@
+github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
+github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go
index 4e2d6a7..93b0e0a 100644
--- a/vendor/github.com/armon/go-metrics/inmem.go
+++ b/vendor/github.com/armon/go-metrics/inmem.go
@@ -255,11 +255,11 @@
 	}
 	copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
 	for k, v := range current.Counters {
-		copyCurrent.Counters[k] = v
+		copyCurrent.Counters[k] = v.deepCopy()
 	}
 	copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
 	for k, v := range current.Samples {
-		copyCurrent.Samples[k] = v
+		copyCurrent.Samples[k] = v.deepCopy()
 	}
 	current.RUnlock()
 
diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
index 504f1b3..5fac958 100644
--- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go
+++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
@@ -41,6 +41,16 @@
 	DisplayLabels map[string]string `json:"Labels"`
 }
 
+// deepCopy allocates a new instance of AggregateSample
+func (source *SampledValue) deepCopy() SampledValue {
+	dest := *source
+	if source.AggregateSample != nil {
+		dest.AggregateSample = &AggregateSample{}
+		*dest.AggregateSample = *source.AggregateSample
+	}
+	return dest
+}
+
 // DisplayMetrics returns a summary of the metrics from the most recent finished interval.
 func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
 	data := i.Data()
@@ -52,12 +62,15 @@
 		return nil, fmt.Errorf("no metric intervals have been initialized yet")
 	case n == 1:
 		// Show the current interval if it's all we have
-		interval = i.intervals[0]
+		interval = data[0]
 	default:
 		// Show the most recent finished interval if we have one
-		interval = i.intervals[n-2]
+		interval = data[n-2]
 	}
 
+	interval.RLock()
+	defer interval.RUnlock()
+
 	summary := MetricsSummary{
 		Timestamp: interval.Interval.Round(time.Second).UTC().String(),
 		Gauges:    make([]GaugeValue, 0, len(interval.Gauges)),
diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go
index cf9def7..4920d68 100644
--- a/vendor/github.com/armon/go-metrics/metrics.go
+++ b/vendor/github.com/armon/go-metrics/metrics.go
@@ -197,7 +197,7 @@
 	if labels == nil {
 		return nil
 	}
-	toReturn := labels[:0]
+	toReturn := []Label{}
 	for _, label := range labels {
 		if m.labelIsAllowed(&label) {
 			toReturn = append(toReturn, label)
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
index e352808..1e91766 100644
--- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -19,7 +19,7 @@
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
 	ExtendedType:  (*descriptor.EnumOptions)(nil),
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
index 686bd2a..341c6f5 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -527,6 +527,7 @@
 // SetExtension sets the specified extension of pb to the specified value.
 func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
 	if epb, ok := pb.(extensionsBytes); ok {
+		ClearExtension(pb, extension)
 		newb, err := encodeExtension(extension, value)
 		if err != nil {
 			return err
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
index 53ebd8c..6f1ae12 100644
--- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -154,6 +154,10 @@
 	return EncodeExtensionMap(m.extensionsWrite(), data)
 }
 
+func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
+}
+
 func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
 	o := 0
 	for _, e := range m {
@@ -169,6 +173,23 @@
 	return o, nil
 }
 
+func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
+	o := 0
+	end := len(data)
+	for _, e := range m {
+		if err := e.Encode(); err != nil {
+			return 0, err
+		}
+		n := copy(data[end-len(e.enc):], e.enc)
+		if n != len(e.enc) {
+			return 0, io.ErrShortBuffer
+		}
+		end -= n
+		o += n
+	}
+	return o, nil
+}
+
 func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
 	e := m[id]
 	if err := e.Encode(); err != nil {
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
index d17f802..80db1c1 100644
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -948,13 +948,19 @@
 	return false
 }
 
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion2 = true
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion3 = true
 
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion1 = true
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion1 = true
+)
 
 // InternalMessageInfo is a type used internally by generated .pb.go files.
 // This type is not intended to be used by non-generated code.
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
index c9e5fa0..62c5562 100644
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -400,6 +400,15 @@
 	return sprop
 }
 
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
 // getPropertiesLocked requires that propertiesMu is held.
 func getPropertiesLocked(t reflect.Type) *StructProperties {
 	if prop, ok := propertiesMap[t]; ok {
@@ -441,37 +450,40 @@
 	// Re-order prop.order.
 	sort.Sort(prop)
 
-	type oneofMessage interface {
-		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-	}
-	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+	if isOneofMessage {
 		var oots []interface{}
-		_, _, _, oots = om.XXX_OneofFuncs()
-
-		// Interpret oneof metadata.
-		prop.OneofTypes = make(map[string]*OneofProperties)
-		for _, oot := range oots {
-			oop := &OneofProperties{
-				Type: reflect.ValueOf(oot).Type(), // *T
-				Prop: new(Properties),
-			}
-			sft := oop.Type.Elem().Field(0)
-			oop.Prop.Name = sft.Name
-			oop.Prop.Parse(sft.Tag.Get("protobuf"))
-			// There will be exactly one interface field that
-			// this new value is assignable to.
-			for i := 0; i < t.NumField(); i++ {
-				f := t.Field(i)
-				if f.Type.Kind() != reflect.Interface {
-					continue
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oots = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oots = m.XXX_OneofWrappers()
+		}
+		if len(oots) > 0 {
+			// Interpret oneof metadata.
+			prop.OneofTypes = make(map[string]*OneofProperties)
+			for _, oot := range oots {
+				oop := &OneofProperties{
+					Type: reflect.ValueOf(oot).Type(), // *T
+					Prop: new(Properties),
 				}
-				if !oop.Type.AssignableTo(f.Type) {
-					continue
+				sft := oop.Type.Elem().Field(0)
+				oop.Prop.Name = sft.Name
+				oop.Prop.Parse(sft.Tag.Get("protobuf"))
+				// There will be exactly one interface field that
+				// this new value is assignable to.
+				for i := 0; i < t.NumField(); i++ {
+					f := t.Field(i)
+					if f.Type.Kind() != reflect.Interface {
+						continue
+					}
+					if !oop.Type.AssignableTo(f.Type) {
+						continue
+					}
+					oop.Field = i
+					break
 				}
-				oop.Field = i
-				break
+				prop.OneofTypes[oop.Prop.OrigName] = oop
 			}
-			prop.OneofTypes[oop.Prop.OrigName] = oop
 		}
 	}
 
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
index 9b1538d..db9927a 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -389,8 +389,13 @@
 	// get oneof implementers
 	var oneofImplementers []interface{}
 	// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
-	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
-		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	if isOneofMessage {
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oneofImplementers = m.XXX_OneofWrappers()
+		}
 	}
 
 	// normal fields
@@ -519,10 +524,6 @@
 	}
 }
 
-type oneofMessage interface {
-	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
 // wiretype returns the wire encoding of the type.
 func wiretype(encoding string) uint64 {
 	switch encoding {
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
index f520106..60dcf70 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_merge.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -530,6 +530,25 @@
 			}
 		case reflect.Struct:
 			switch {
+			case isSlice && !isPointer: // E.g. []pb.T
+				mergeInfo := getMergeInfo(tf)
+				zero := reflect.Zero(tf)
+				mfi.merge = func(dst, src pointer) {
+					// TODO: Make this faster?
+					dstsp := dst.asPointerTo(f.Type)
+					dsts := dstsp.Elem()
+					srcs := src.asPointerTo(f.Type).Elem()
+					for i := 0; i < srcs.Len(); i++ {
+						dsts = reflect.Append(dsts, zero)
+						srcElement := srcs.Index(i).Addr()
+						dstElement := dsts.Index(dsts.Len() - 1).Addr()
+						mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
+					}
+					if dsts.IsNil() {
+						dsts = reflect.MakeSlice(f.Type, 0, 0)
+					}
+					dstsp.Elem().Set(dsts)
+				}
 			case !isPointer:
 				mergeInfo := getMergeInfo(tf)
 				mfi.merge = func(dst, src pointer) {
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
index bb2622f..9372293 100644
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -371,15 +371,18 @@
 	}
 
 	// Find any types associated with oneof fields.
-	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
-	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
 	// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
-	if fn.IsValid() && len(oneofFields) > 0 {
-		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
-		for i := res.Len() - 1; i >= 0; i-- {
-			v := res.Index(i)                             // interface{}
-			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
-			typ := tptr.Elem()                            // Msg_X
+	if len(oneofFields) > 0 {
+		var oneofImplementers []interface{}
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oneofImplementers = m.XXX_OneofWrappers()
+		}
+		for _, v := range oneofImplementers {
+			tptr := reflect.TypeOf(v) // *Msg_X
+			typ := tptr.Elem()        // Msg_X
 
 			f := typ.Field(0) // oneof implementers have one field
 			baseUnmarshal := fieldUnmarshaler(&f)
@@ -407,11 +410,12 @@
 					u.setTag(fieldNum, of.field, unmarshal, 0, name)
 				}
 			}
+
 		}
 	}
 
 	// Get extension ranges, if any.
-	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
 	if fn.IsValid() {
 		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
 			panic("a message with extensions, but no extensions field in " + t.Name())
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
index cacfa39..d1307d9 100644
--- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -18,7 +18,7 @@
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 type FieldDescriptorProto_Type int32
 
diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
deleted file mode 100644
index ceadde6..0000000
--- a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package sortkeys
-
-import (
-	"sort"
-)
-
-func Strings(l []string) {
-	sort.Strings(l)
-}
-
-func Float64s(l []float64) {
-	sort.Float64s(l)
-}
-
-func Float32s(l []float32) {
-	sort.Sort(Float32Slice(l))
-}
-
-func Int64s(l []int64) {
-	sort.Sort(Int64Slice(l))
-}
-
-func Int32s(l []int32) {
-	sort.Sort(Int32Slice(l))
-}
-
-func Uint64s(l []uint64) {
-	sort.Sort(Uint64Slice(l))
-}
-
-func Uint32s(l []uint32) {
-	sort.Sort(Uint32Slice(l))
-}
-
-func Bools(l []bool) {
-	sort.Sort(BoolSlice(l))
-}
-
-type BoolSlice []bool
-
-func (p BoolSlice) Len() int           { return len(p) }
-func (p BoolSlice) Less(i, j int) bool { return p[j] }
-func (p BoolSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type Int64Slice []int64
-
-func (p Int64Slice) Len() int           { return len(p) }
-func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type Int32Slice []int32
-
-func (p Int32Slice) Len() int           { return len(p) }
-func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type Uint64Slice []uint64
-
-func (p Uint64Slice) Len() int           { return len(p) }
-func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type Uint32Slice []uint32
-
-func (p Uint32Slice) Len() int           { return len(p) }
-func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Uint32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-type Float32Slice []float32
-
-func (p Float32Slice) Len() int           { return len(p) }
-func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Float32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
deleted file mode 100644
index 4f2ee4d..0000000
--- a/vendor/github.com/google/btree/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/google/btree/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
deleted file mode 100644
index 6062a4d..0000000
--- a/vendor/github.com/google/btree/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# BTree implementation for Go
-
-![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
-
-This package provides an in-memory B-Tree implementation for Go, useful as
-an ordered, mutable data structure.
-
-The API is based off of the wonderful
-http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
-act as a drop-in replacement for gollrb trees.
-
-See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
deleted file mode 100644
index 6ff062f..0000000
--- a/vendor/github.com/google/btree/btree.go
+++ /dev/null
@@ -1,890 +0,0 @@
-// Copyright 2014 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package btree implements in-memory B-Trees of arbitrary degree.
-//
-// btree implements an in-memory B-Tree for use as an ordered data structure.
-// It is not meant for persistent storage solutions.
-//
-// It has a flatter structure than an equivalent red-black or other binary tree,
-// which in some cases yields better memory usage and/or performance.
-// See some discussion on the matter here:
-//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
-// Note, though, that this project is in no way related to the C++ B-Tree
-// implementation written about there.
-//
-// Within this tree, each node contains a slice of items and a (possibly nil)
-// slice of children.  For basic numeric values or raw structs, this can cause
-// efficiency differences when compared to equivalent C++ template code that
-// stores values in arrays within the node:
-//   * Due to the overhead of storing values as interfaces (each
-//     value needs to be stored as the value itself, then 2 words for the
-//     interface pointing to that value and its type), resulting in higher
-//     memory use.
-//   * Since interfaces can point to values anywhere in memory, values are
-//     most likely not stored in contiguous blocks, resulting in a higher
-//     number of cache misses.
-// These issues don't tend to matter, though, when working with strings or other
-// heap-allocated structures, since C++-equivalent structures also must store
-// pointers and also distribute their values across the heap.
-//
-// This implementation is designed to be a drop-in replacement to gollrb.LLRB
-// trees, (http://github.com/petar/gollrb), an excellent and probably the most
-// widely used ordered tree implementation in the Go ecosystem currently.
-// Its functions, therefore, exactly mirror those of
-// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
-// support storing multiple equivalent values.
-package btree
-
-import (
-	"fmt"
-	"io"
-	"sort"
-	"strings"
-	"sync"
-)
-
-// Item represents a single object in the tree.
-type Item interface {
-	// Less tests whether the current item is less than the given argument.
-	//
-	// This must provide a strict weak ordering.
-	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
-	// hold one of either a or b in the tree).
-	Less(than Item) bool
-}
-
-const (
-	DefaultFreeListSize = 32
-)
-
-var (
-	nilItems    = make(items, 16)
-	nilChildren = make(children, 16)
-)
-
-// FreeList represents a free list of btree nodes. By default each
-// BTree has its own FreeList, but multiple BTrees can share the same
-// FreeList.
-// Two Btrees using the same freelist are safe for concurrent write access.
-type FreeList struct {
-	mu       sync.Mutex
-	freelist []*node
-}
-
-// NewFreeList creates a new free list.
-// size is the maximum size of the returned free list.
-func NewFreeList(size int) *FreeList {
-	return &FreeList{freelist: make([]*node, 0, size)}
-}
-
-func (f *FreeList) newNode() (n *node) {
-	f.mu.Lock()
-	index := len(f.freelist) - 1
-	if index < 0 {
-		f.mu.Unlock()
-		return new(node)
-	}
-	n = f.freelist[index]
-	f.freelist[index] = nil
-	f.freelist = f.freelist[:index]
-	f.mu.Unlock()
-	return
-}
-
-// freeNode adds the given node to the list, returning true if it was added
-// and false if it was discarded.
-func (f *FreeList) freeNode(n *node) (out bool) {
-	f.mu.Lock()
-	if len(f.freelist) < cap(f.freelist) {
-		f.freelist = append(f.freelist, n)
-		out = true
-	}
-	f.mu.Unlock()
-	return
-}
-
-// ItemIterator allows callers of Ascend* to iterate in-order over portions of
-// the tree.  When this function returns false, iteration will stop and the
-// associated Ascend* function will immediately return.
-type ItemIterator func(i Item) bool
-
-// New creates a new B-Tree with the given degree.
-//
-// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
-// and 2-4 children).
-func New(degree int) *BTree {
-	return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
-}
-
-// NewWithFreeList creates a new B-Tree that uses the given node free list.
-func NewWithFreeList(degree int, f *FreeList) *BTree {
-	if degree <= 1 {
-		panic("bad degree")
-	}
-	return &BTree{
-		degree: degree,
-		cow:    &copyOnWriteContext{freelist: f},
-	}
-}
-
-// items stores items in a node.
-type items []Item
-
-// insertAt inserts a value into the given index, pushing all subsequent values
-// forward.
-func (s *items) insertAt(index int, item Item) {
-	*s = append(*s, nil)
-	if index < len(*s) {
-		copy((*s)[index+1:], (*s)[index:])
-	}
-	(*s)[index] = item
-}
-
-// removeAt removes a value at a given index, pulling all subsequent values
-// back.
-func (s *items) removeAt(index int) Item {
-	item := (*s)[index]
-	copy((*s)[index:], (*s)[index+1:])
-	(*s)[len(*s)-1] = nil
-	*s = (*s)[:len(*s)-1]
-	return item
-}
-
-// pop removes and returns the last element in the list.
-func (s *items) pop() (out Item) {
-	index := len(*s) - 1
-	out = (*s)[index]
-	(*s)[index] = nil
-	*s = (*s)[:index]
-	return
-}
-
-// truncate truncates this instance at index so that it contains only the
-// first index items. index must be less than or equal to length.
-func (s *items) truncate(index int) {
-	var toClear items
-	*s, toClear = (*s)[:index], (*s)[index:]
-	for len(toClear) > 0 {
-		toClear = toClear[copy(toClear, nilItems):]
-	}
-}
-
-// find returns the index where the given item should be inserted into this
-// list.  'found' is true if the item already exists in the list at the given
-// index.
-func (s items) find(item Item) (index int, found bool) {
-	i := sort.Search(len(s), func(i int) bool {
-		return item.Less(s[i])
-	})
-	if i > 0 && !s[i-1].Less(item) {
-		return i - 1, true
-	}
-	return i, false
-}
-
-// children stores child nodes in a node.
-type children []*node
-
-// insertAt inserts a value into the given index, pushing all subsequent values
-// forward.
-func (s *children) insertAt(index int, n *node) {
-	*s = append(*s, nil)
-	if index < len(*s) {
-		copy((*s)[index+1:], (*s)[index:])
-	}
-	(*s)[index] = n
-}
-
-// removeAt removes a value at a given index, pulling all subsequent values
-// back.
-func (s *children) removeAt(index int) *node {
-	n := (*s)[index]
-	copy((*s)[index:], (*s)[index+1:])
-	(*s)[len(*s)-1] = nil
-	*s = (*s)[:len(*s)-1]
-	return n
-}
-
-// pop removes and returns the last element in the list.
-func (s *children) pop() (out *node) {
-	index := len(*s) - 1
-	out = (*s)[index]
-	(*s)[index] = nil
-	*s = (*s)[:index]
-	return
-}
-
-// truncate truncates this instance at index so that it contains only the
-// first index children. index must be less than or equal to length.
-func (s *children) truncate(index int) {
-	var toClear children
-	*s, toClear = (*s)[:index], (*s)[index:]
-	for len(toClear) > 0 {
-		toClear = toClear[copy(toClear, nilChildren):]
-	}
-}
-
-// node is an internal node in a tree.
-//
-// It must at all times maintain the invariant that either
-//   * len(children) == 0, len(items) unconstrained
-//   * len(children) == len(items) + 1
-type node struct {
-	items    items
-	children children
-	cow      *copyOnWriteContext
-}
-
-func (n *node) mutableFor(cow *copyOnWriteContext) *node {
-	if n.cow == cow {
-		return n
-	}
-	out := cow.newNode()
-	if cap(out.items) >= len(n.items) {
-		out.items = out.items[:len(n.items)]
-	} else {
-		out.items = make(items, len(n.items), cap(n.items))
-	}
-	copy(out.items, n.items)
-	// Copy children
-	if cap(out.children) >= len(n.children) {
-		out.children = out.children[:len(n.children)]
-	} else {
-		out.children = make(children, len(n.children), cap(n.children))
-	}
-	copy(out.children, n.children)
-	return out
-}
-
-func (n *node) mutableChild(i int) *node {
-	c := n.children[i].mutableFor(n.cow)
-	n.children[i] = c
-	return c
-}
-
-// split splits the given node at the given index.  The current node shrinks,
-// and this function returns the item that existed at that index and a new node
-// containing all items/children after it.
-func (n *node) split(i int) (Item, *node) {
-	item := n.items[i]
-	next := n.cow.newNode()
-	next.items = append(next.items, n.items[i+1:]...)
-	n.items.truncate(i)
-	if len(n.children) > 0 {
-		next.children = append(next.children, n.children[i+1:]...)
-		n.children.truncate(i + 1)
-	}
-	return item, next
-}
-
-// maybeSplitChild checks if a child should be split, and if so splits it.
-// Returns whether or not a split occurred.
-func (n *node) maybeSplitChild(i, maxItems int) bool {
-	if len(n.children[i].items) < maxItems {
-		return false
-	}
-	first := n.mutableChild(i)
-	item, second := first.split(maxItems / 2)
-	n.items.insertAt(i, item)
-	n.children.insertAt(i+1, second)
-	return true
-}
-
-// insert inserts an item into the subtree rooted at this node, making sure
-// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
-// be found/replaced by insert, it will be returned.
-func (n *node) insert(item Item, maxItems int) Item {
-	i, found := n.items.find(item)
-	if found {
-		out := n.items[i]
-		n.items[i] = item
-		return out
-	}
-	if len(n.children) == 0 {
-		n.items.insertAt(i, item)
-		return nil
-	}
-	if n.maybeSplitChild(i, maxItems) {
-		inTree := n.items[i]
-		switch {
-		case item.Less(inTree):
-			// no change, we want first split node
-		case inTree.Less(item):
-			i++ // we want second split node
-		default:
-			out := n.items[i]
-			n.items[i] = item
-			return out
-		}
-	}
-	return n.mutableChild(i).insert(item, maxItems)
-}
-
-// get finds the given key in the subtree and returns it.
-func (n *node) get(key Item) Item {
-	i, found := n.items.find(key)
-	if found {
-		return n.items[i]
-	} else if len(n.children) > 0 {
-		return n.children[i].get(key)
-	}
-	return nil
-}
-
-// min returns the first item in the subtree.
-func min(n *node) Item {
-	if n == nil {
-		return nil
-	}
-	for len(n.children) > 0 {
-		n = n.children[0]
-	}
-	if len(n.items) == 0 {
-		return nil
-	}
-	return n.items[0]
-}
-
-// max returns the last item in the subtree.
-func max(n *node) Item {
-	if n == nil {
-		return nil
-	}
-	for len(n.children) > 0 {
-		n = n.children[len(n.children)-1]
-	}
-	if len(n.items) == 0 {
-		return nil
-	}
-	return n.items[len(n.items)-1]
-}
-
-// toRemove details what item to remove in a node.remove call.
-type toRemove int
-
-const (
-	removeItem toRemove = iota // removes the given item
-	removeMin                  // removes smallest item in the subtree
-	removeMax                  // removes largest item in the subtree
-)
-
-// remove removes an item from the subtree rooted at this node.
-func (n *node) remove(item Item, minItems int, typ toRemove) Item {
-	var i int
-	var found bool
-	switch typ {
-	case removeMax:
-		if len(n.children) == 0 {
-			return n.items.pop()
-		}
-		i = len(n.items)
-	case removeMin:
-		if len(n.children) == 0 {
-			return n.items.removeAt(0)
-		}
-		i = 0
-	case removeItem:
-		i, found = n.items.find(item)
-		if len(n.children) == 0 {
-			if found {
-				return n.items.removeAt(i)
-			}
-			return nil
-		}
-	default:
-		panic("invalid type")
-	}
-	// If we get to here, we have children.
-	if len(n.children[i].items) <= minItems {
-		return n.growChildAndRemove(i, item, minItems, typ)
-	}
-	child := n.mutableChild(i)
-	// Either we had enough items to begin with, or we've done some
-	// merging/stealing, because we've got enough now and we're ready to return
-	// stuff.
-	if found {
-		// The item exists at index 'i', and the child we've selected can give us a
-		// predecessor, since if we've gotten here it's got > minItems items in it.
-		out := n.items[i]
-		// We use our special-case 'remove' call with typ=maxItem to pull the
-		// predecessor of item i (the rightmost leaf of our immediate left child)
-		// and set it into where we pulled the item from.
-		n.items[i] = child.remove(nil, minItems, removeMax)
-		return out
-	}
-	// Final recursive call.  Once we're here, we know that the item isn't in this
-	// node and that the child is big enough to remove from.
-	return child.remove(item, minItems, typ)
-}
-
-// growChildAndRemove grows child 'i' to make sure it's possible to remove an
-// item from it while keeping it at minItems, then calls remove to actually
-// remove it.
-//
-// Most documentation says we have to do two sets of special casing:
-//   1) item is in this node
-//   2) item is in child
-// In both cases, we need to handle the two subcases:
-//   A) node has enough values that it can spare one
-//   B) node doesn't have enough values
-// For the latter, we have to check:
-//   a) left sibling has node to spare
-//   b) right sibling has node to spare
-//   c) we must merge
-// To simplify our code here, we handle cases #1 and #2 the same:
-// If a node doesn't have enough items, we make sure it does (using a,b,c).
-// We then simply redo our remove call, and the second time (regardless of
-// whether we're in case 1 or 2), we'll have enough items and can guarantee
-// that we hit case A.
-func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
-	if i > 0 && len(n.children[i-1].items) > minItems {
-		// Steal from left child
-		child := n.mutableChild(i)
-		stealFrom := n.mutableChild(i - 1)
-		stolenItem := stealFrom.items.pop()
-		child.items.insertAt(0, n.items[i-1])
-		n.items[i-1] = stolenItem
-		if len(stealFrom.children) > 0 {
-			child.children.insertAt(0, stealFrom.children.pop())
-		}
-	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
-		// steal from right child
-		child := n.mutableChild(i)
-		stealFrom := n.mutableChild(i + 1)
-		stolenItem := stealFrom.items.removeAt(0)
-		child.items = append(child.items, n.items[i])
-		n.items[i] = stolenItem
-		if len(stealFrom.children) > 0 {
-			child.children = append(child.children, stealFrom.children.removeAt(0))
-		}
-	} else {
-		if i >= len(n.items) {
-			i--
-		}
-		child := n.mutableChild(i)
-		// merge with right child
-		mergeItem := n.items.removeAt(i)
-		mergeChild := n.children.removeAt(i + 1)
-		child.items = append(child.items, mergeItem)
-		child.items = append(child.items, mergeChild.items...)
-		child.children = append(child.children, mergeChild.children...)
-		n.cow.freeNode(mergeChild)
-	}
-	return n.remove(item, minItems, typ)
-}
-
-type direction int
-
-const (
-	descend = direction(-1)
-	ascend  = direction(+1)
-)
-
-// iterate provides a simple method for iterating over elements in the tree.
-//
-// When ascending, the 'start' should be less than 'stop' and when descending,
-// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
-// will force the iterator to include the first item when it equals 'start',
-// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
-// "greaterThan" or "lessThan" queries.
-func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
-	var ok, found bool
-	var index int
-	switch dir {
-	case ascend:
-		if start != nil {
-			index, _ = n.items.find(start)
-		}
-		for i := index; i < len(n.items); i++ {
-			if len(n.children) > 0 {
-				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
-					return hit, false
-				}
-			}
-			if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
-				hit = true
-				continue
-			}
-			hit = true
-			if stop != nil && !n.items[i].Less(stop) {
-				return hit, false
-			}
-			if !iter(n.items[i]) {
-				return hit, false
-			}
-		}
-		if len(n.children) > 0 {
-			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
-				return hit, false
-			}
-		}
-	case descend:
-		if start != nil {
-			index, found = n.items.find(start)
-			if !found {
-				index = index - 1
-			}
-		} else {
-			index = len(n.items) - 1
-		}
-		for i := index; i >= 0; i-- {
-			if start != nil && !n.items[i].Less(start) {
-				if !includeStart || hit || start.Less(n.items[i]) {
-					continue
-				}
-			}
-			if len(n.children) > 0 {
-				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
-					return hit, false
-				}
-			}
-			if stop != nil && !stop.Less(n.items[i]) {
-				return hit, false //	continue
-			}
-			hit = true
-			if !iter(n.items[i]) {
-				return hit, false
-			}
-		}
-		if len(n.children) > 0 {
-			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
-				return hit, false
-			}
-		}
-	}
-	return hit, true
-}
-
-// Used for testing/debugging purposes.
-func (n *node) print(w io.Writer, level int) {
-	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
-	for _, c := range n.children {
-		c.print(w, level+1)
-	}
-}
-
-// BTree is an implementation of a B-Tree.
-//
-// BTree stores Item instances in an ordered structure, allowing easy insertion,
-// removal, and iteration.
-//
-// Write operations are not safe for concurrent mutation by multiple
-// goroutines, but Read operations are.
-type BTree struct {
-	degree int
-	length int
-	root   *node
-	cow    *copyOnWriteContext
-}
-
-// copyOnWriteContext pointers determine node ownership... a tree with a write
-// context equivalent to a node's write context is allowed to modify that node.
-// A tree whose write context does not match a node's is not allowed to modify
-// it, and must create a new, writable copy (IE: it's a Clone).
-//
-// When doing any write operation, we maintain the invariant that the current
-// node's context is equal to the context of the tree that requested the write.
-// We do this by, before we descend into any node, creating a copy with the
-// correct context if the contexts don't match.
-//
-// Since the node we're currently visiting on any write has the requesting
-// tree's context, that node is modifiable in place.  Children of that node may
-// not share context, but before we descend into them, we'll make a mutable
-// copy.
-type copyOnWriteContext struct {
-	freelist *FreeList
-}
-
-// Clone clones the btree, lazily.  Clone should not be called concurrently,
-// but the original tree (t) and the new tree (t2) can be used concurrently
-// once the Clone call completes.
-//
-// The internal tree structure of b is marked read-only and shared between t and
-// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
-// whenever one of b's original nodes would have been modified.  Read operations
-// should have no performance degredation.  Write operations for both t and t2
-// will initially experience minor slow-downs caused by additional allocs and
-// copies due to the aforementioned copy-on-write logic, but should converge to
-// the original performance characteristics of the original tree.
-func (t *BTree) Clone() (t2 *BTree) {
-	// Create two entirely new copy-on-write contexts.
-	// This operation effectively creates three trees:
-	//   the original, shared nodes (old b.cow)
-	//   the new b.cow nodes
-	//   the new out.cow nodes
-	cow1, cow2 := *t.cow, *t.cow
-	out := *t
-	t.cow = &cow1
-	out.cow = &cow2
-	return &out
-}
-
-// maxItems returns the max number of items to allow per node.
-func (t *BTree) maxItems() int {
-	return t.degree*2 - 1
-}
-
-// minItems returns the min number of items to allow per node (ignored for the
-// root node).
-func (t *BTree) minItems() int {
-	return t.degree - 1
-}
-
-func (c *copyOnWriteContext) newNode() (n *node) {
-	n = c.freelist.newNode()
-	n.cow = c
-	return
-}
-
-type freeType int
-
-const (
-	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
-	ftStored                       // node was stored in the freelist for later use
-	ftNotOwned                     // node was ignored by COW, since it's owned by another one
-)
-
-// freeNode frees a node within a given COW context, if it's owned by that
-// context.  It returns what happened to the node (see freeType const
-// documentation).
-func (c *copyOnWriteContext) freeNode(n *node) freeType {
-	if n.cow == c {
-		// clear to allow GC
-		n.items.truncate(0)
-		n.children.truncate(0)
-		n.cow = nil
-		if c.freelist.freeNode(n) {
-			return ftStored
-		} else {
-			return ftFreelistFull
-		}
-	} else {
-		return ftNotOwned
-	}
-}
-
-// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
-// already equals the given one, it is removed from the tree and returned.
-// Otherwise, nil is returned.
-//
-// nil cannot be added to the tree (will panic).
-func (t *BTree) ReplaceOrInsert(item Item) Item {
-	if item == nil {
-		panic("nil item being added to BTree")
-	}
-	if t.root == nil {
-		t.root = t.cow.newNode()
-		t.root.items = append(t.root.items, item)
-		t.length++
-		return nil
-	} else {
-		t.root = t.root.mutableFor(t.cow)
-		if len(t.root.items) >= t.maxItems() {
-			item2, second := t.root.split(t.maxItems() / 2)
-			oldroot := t.root
-			t.root = t.cow.newNode()
-			t.root.items = append(t.root.items, item2)
-			t.root.children = append(t.root.children, oldroot, second)
-		}
-	}
-	out := t.root.insert(item, t.maxItems())
-	if out == nil {
-		t.length++
-	}
-	return out
-}
-
-// Delete removes an item equal to the passed in item from the tree, returning
-// it.  If no such item exists, returns nil.
-func (t *BTree) Delete(item Item) Item {
-	return t.deleteItem(item, removeItem)
-}
-
-// DeleteMin removes the smallest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMin() Item {
-	return t.deleteItem(nil, removeMin)
-}
-
-// DeleteMax removes the largest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMax() Item {
-	return t.deleteItem(nil, removeMax)
-}
-
-func (t *BTree) deleteItem(item Item, typ toRemove) Item {
-	if t.root == nil || len(t.root.items) == 0 {
-		return nil
-	}
-	t.root = t.root.mutableFor(t.cow)
-	out := t.root.remove(item, t.minItems(), typ)
-	if len(t.root.items) == 0 && len(t.root.children) > 0 {
-		oldroot := t.root
-		t.root = t.root.children[0]
-		t.cow.freeNode(oldroot)
-	}
-	if out != nil {
-		t.length--
-	}
-	return out
-}
-
-// AscendRange calls the iterator for every value in the tree within the range
-// [greaterOrEqual, lessThan), until iterator returns false.
-func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
-}
-
-// AscendLessThan calls the iterator for every value in the tree within the range
-// [first, pivot), until iterator returns false.
-func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(ascend, nil, pivot, false, false, iterator)
-}
-
-// AscendGreaterOrEqual calls the iterator for every value in the tree within
-// the range [pivot, last], until iterator returns false.
-func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(ascend, pivot, nil, true, false, iterator)
-}
-
-// Ascend calls the iterator for every value in the tree within the range
-// [first, last], until iterator returns false.
-func (t *BTree) Ascend(iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(ascend, nil, nil, false, false, iterator)
-}
-
-// DescendRange calls the iterator for every value in the tree within the range
-// [lessOrEqual, greaterThan), until iterator returns false.
-func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
-}
-
-// DescendLessOrEqual calls the iterator for every value in the tree within the range
-// [pivot, first], until iterator returns false.
-func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(descend, pivot, nil, true, false, iterator)
-}
-
-// DescendGreaterThan calls the iterator for every value in the tree within
-// the range (pivot, last], until iterator returns false.
-func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(descend, nil, pivot, false, false, iterator)
-}
-
-// Descend calls the iterator for every value in the tree within the range
-// [last, first], until iterator returns false.
-func (t *BTree) Descend(iterator ItemIterator) {
-	if t.root == nil {
-		return
-	}
-	t.root.iterate(descend, nil, nil, false, false, iterator)
-}
-
-// Get looks for the key item in the tree, returning it.  It returns nil if
-// unable to find that item.
-func (t *BTree) Get(key Item) Item {
-	if t.root == nil {
-		return nil
-	}
-	return t.root.get(key)
-}
-
-// Min returns the smallest item in the tree, or nil if the tree is empty.
-func (t *BTree) Min() Item {
-	return min(t.root)
-}
-
-// Max returns the largest item in the tree, or nil if the tree is empty.
-func (t *BTree) Max() Item {
-	return max(t.root)
-}
-
-// Has returns true if the given key is in the tree.
-func (t *BTree) Has(key Item) bool {
-	return t.Get(key) != nil
-}
-
-// Len returns the number of items currently in the tree.
-func (t *BTree) Len() int {
-	return t.length
-}
-
-// Clear removes all items from the btree.  If addNodesToFreelist is true,
-// t's nodes are added to its freelist as part of this call, until the freelist
-// is full.  Otherwise, the root node is simply dereferenced and the subtree
-// left to Go's normal GC processes.
-//
-// This can be much faster
-// than calling Delete on all elements, because that requires finding/removing
-// each element in the tree and updating the tree accordingly.  It also is
-// somewhat faster than creating a new tree to replace the old one, because
-// nodes from the old tree are reclaimed into the freelist for use by the new
-// one, instead of being lost to the garbage collector.
-//
-// This call takes:
-//   O(1): when addNodesToFreelist is false, this is a single operation.
-//   O(1): when the freelist is already full, it breaks out immediately
-//   O(freelist size):  when the freelist is empty and the nodes are all owned
-//       by this tree, nodes are added to the freelist until full.
-//   O(tree size):  when all nodes are owned by another tree, all nodes are
-//       iterated over looking for nodes to add to the freelist, and due to
-//       ownership, none are.
-func (t *BTree) Clear(addNodesToFreelist bool) {
-	if t.root != nil && addNodesToFreelist {
-		t.root.reset(t.cow)
-	}
-	t.root, t.length = nil, 0
-}
-
-// reset returns a subtree to the freelist.  It breaks out immediately if the
-// freelist is full, since the only benefit of iterating is to fill that
-// freelist up.  Returns true if parent reset call should continue.
-func (n *node) reset(c *copyOnWriteContext) bool {
-	for _, child := range n.children {
-		if !child.reset(c) {
-			return false
-		}
-	}
-	return c.freeNode(n) != ftFreelistFull
-}
-
-// Int implements the Item interface for integers.
-type Int int
-
-// Less returns true if int(a) < int(b).
-func (a Int) Less(b Item) bool {
-	return a < b.(Int)
-}
diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go
deleted file mode 100644
index cb95b7f..0000000
--- a/vendor/github.com/google/btree/btree_mem.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build ignore
-
-// This binary compares memory usage between btree and gollrb.
-package main
-
-import (
-	"flag"
-	"fmt"
-	"math/rand"
-	"runtime"
-	"time"
-
-	"github.com/google/btree"
-	"github.com/petar/GoLLRB/llrb"
-)
-
-var (
-	size   = flag.Int("size", 1000000, "size of the tree to build")
-	degree = flag.Int("degree", 8, "degree of btree")
-	gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
-)
-
-func main() {
-	flag.Parse()
-	vals := rand.Perm(*size)
-	var t, v interface{}
-	v = vals
-	var stats runtime.MemStats
-	for i := 0; i < 10; i++ {
-		runtime.GC()
-	}
-	fmt.Println("-------- BEFORE ----------")
-	runtime.ReadMemStats(&stats)
-	fmt.Printf("%+v\n", stats)
-	start := time.Now()
-	if *gollrb {
-		tr := llrb.New()
-		for _, v := range vals {
-			tr.ReplaceOrInsert(llrb.Int(v))
-		}
-		t = tr // keep it around
-	} else {
-		tr := btree.New(*degree)
-		for _, v := range vals {
-			tr.ReplaceOrInsert(btree.Int(v))
-		}
-		t = tr // keep it around
-	}
-	fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
-	fmt.Println("-------- AFTER ----------")
-	runtime.ReadMemStats(&stats)
-	fmt.Printf("%+v\n", stats)
-	for i := 0; i < 10; i++ {
-		runtime.GC()
-	}
-	fmt.Println("-------- AFTER GC ----------")
-	runtime.ReadMemStats(&stats)
-	fmt.Printf("%+v\n", stats)
-	if t == v {
-		fmt.Println("to make sure vals and tree aren't GC'd")
-	}
-}
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
deleted file mode 100644
index f8684d9..0000000
--- a/vendor/github.com/google/gofuzz/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
-  - 1.4
-  - 1.3
-  - 1.2
-  - tip
-
-install:
-  - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
-
-script:
-  - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
deleted file mode 100644
index 51cf5cd..0000000
--- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# How to contribute #
-
-We'd love to accept your patches and contributions to this project.  There are
-a just a few small guidelines you need to follow.
-
-
-## Contributor License Agreement ##
-
-Contributions to any Google project must be accompanied by a Contributor
-License Agreement.  This is not a copyright **assignment**, it simply gives
-Google permission to use and redistribute your contributions as part of the
-project.
-
-  * If you are an individual writing original source code and you're sure you
-    own the intellectual property, then you'll need to sign an [individual
-    CLA][].
-
-  * If you work for a company that wants to allow you to contribute your work,
-    then you'll need to sign a [corporate CLA][].
-
-You generally only need to submit a CLA once, so if you've already submitted
-one (even if it was for a different project), you probably don't need to do it
-again.
-
-[individual CLA]: https://developers.google.com/open-source/cla/individual
-[corporate CLA]: https://developers.google.com/open-source/cla/corporate
-
-
-## Submitting a patch ##
-
-  1. It's generally best to start by opening a new issue describing the bug or
-     feature you're intending to fix.  Even if you think it's relatively minor,
-     it's helpful to know what people are working on.  Mention in the initial
-     issue that you are planning to work on that bug or feature so that it can
-     be assigned to you.
-
-  1. Follow the normal process of [forking][] the project, and setup a new
-     branch to work in.  It's important that each group of changes be done in
-     separate branches in order to ensure that a pull request only includes the
-     commits related to that bug or feature.
-
-  1. Go makes it very simple to ensure properly formatted code, so always run
-     `go fmt` on your code before committing it.  You should also run
-     [golint][] over your code.  As noted in the [golint readme][], it's not
-     strictly necessary that your code be completely "lint-free", but this will
-     help you find common style issues.
-
-  1. Any significant changes should almost always be accompanied by tests.  The
-     project already has good test coverage, so look at some of the existing
-     tests if you're unsure how to go about it.  [gocov][] and [gocov-html][]
-     are invaluable tools for seeing which parts of your code aren't being
-     exercised by your tests.
-
-  1. Do your best to have [well-formed commit messages][] for each change.
-     This provides consistency throughout the project, and ensures that commit
-     messages are able to be formatted properly by various git tools.
-
-  1. Finally, push the commits to your fork and submit a [pull request][].
-
-[forking]: https://help.github.com/articles/fork-a-repo
-[golint]: https://github.com/golang/lint
-[golint readme]: https://github.com/golang/lint/blob/master/README
-[gocov]: https://github.com/axw/gocov
-[gocov-html]: https://github.com/matm/gocov-html
-[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
-[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
-[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/google/gofuzz/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
deleted file mode 100644
index 64869af..0000000
--- a/vendor/github.com/google/gofuzz/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-gofuzz
-======
-
-gofuzz is a library for populating go objects with random values.
-
-[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
-[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
-
-This is useful for testing:
-
-* Do your project's objects really serialize/unserialize correctly in all cases?
-* Is there an incorrectly formatted object that will cause your project to panic?
-
-Import with ```import "github.com/google/gofuzz"```
-
-You can use it on single variables:
-```go
-f := fuzz.New()
-var myInt int
-f.Fuzz(&myInt) // myInt gets a random value.
-```
-
-You can use it on maps:
-```go
-f := fuzz.New().NilChance(0).NumElements(1, 1)
-var myMap map[ComplexKeyType]string
-f.Fuzz(&myMap) // myMap will have exactly one element.
-```
-
-Customize the chance of getting a nil pointer:
-```go
-f := fuzz.New().NilChance(.5)
-var fancyStruct struct {
-  A, B, C, D *string
-}
-f.Fuzz(&fancyStruct) // About half the pointers should be set.
-```
-
-You can even customize the randomization completely if needed:
-```go
-type MyEnum string
-const (
-        A MyEnum = "A"
-        B MyEnum = "B"
-)
-type MyInfo struct {
-        Type MyEnum
-        AInfo *string
-        BInfo *string
-}
-
-f := fuzz.New().NilChance(0).Funcs(
-        func(e *MyInfo, c fuzz.Continue) {
-                switch c.Intn(2) {
-                case 0:
-                        e.Type = A
-                        c.Fuzz(&e.AInfo)
-                case 1:
-                        e.Type = B
-                        c.Fuzz(&e.BInfo)
-                }
-        },
-)
-
-var myObject MyInfo
-f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
-```
-
-See more examples in ```example_test.go```.
-
-Happy testing!
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go
deleted file mode 100644
index 9f9956d..0000000
--- a/vendor/github.com/google/gofuzz/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package fuzz is a library for populating go objects with random values.
-package fuzz
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
deleted file mode 100644
index 1dfa80a..0000000
--- a/vendor/github.com/google/gofuzz/fuzz.go
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package fuzz
-
-import (
-	"fmt"
-	"math/rand"
-	"reflect"
-	"time"
-)
-
-// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
-type fuzzFuncMap map[reflect.Type]reflect.Value
-
-// Fuzzer knows how to fill any object with random fields.
-type Fuzzer struct {
-	fuzzFuncs        fuzzFuncMap
-	defaultFuzzFuncs fuzzFuncMap
-	r                *rand.Rand
-	nilChance        float64
-	minElements      int
-	maxElements      int
-	maxDepth         int
-}
-
-// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
-// RandSource, NilChance, or NumElements in any order.
-func New() *Fuzzer {
-	return NewWithSeed(time.Now().UnixNano())
-}
-
-func NewWithSeed(seed int64) *Fuzzer {
-	f := &Fuzzer{
-		defaultFuzzFuncs: fuzzFuncMap{
-			reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
-		},
-
-		fuzzFuncs:   fuzzFuncMap{},
-		r:           rand.New(rand.NewSource(seed)),
-		nilChance:   .2,
-		minElements: 1,
-		maxElements: 10,
-		maxDepth:    100,
-	}
-	return f
-}
-
-// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
-//
-// Each entry in fuzzFuncs must be a function taking two parameters.
-// The first parameter must be a pointer or map. It is the variable that
-// function will fill with random data. The second parameter must be a
-// fuzz.Continue, which will provide a source of randomness and a way
-// to automatically continue fuzzing smaller pieces of the first parameter.
-//
-// These functions are called sensibly, e.g., if you wanted custom string
-// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
-// called and passed the address of strings. Maps and pointers will always
-// be made/new'd for you, ignoring the NilChange option. For slices, it
-// doesn't make much sense to  pre-create them--Fuzzer doesn't know how
-// long you want your slice--so take a pointer to a slice, and make it
-// yourself. (If you don't want your map/pointer type pre-made, take a
-// pointer to it, and make it yourself.) See the examples for a range of
-// custom functions.
-func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
-	for i := range fuzzFuncs {
-		v := reflect.ValueOf(fuzzFuncs[i])
-		if v.Kind() != reflect.Func {
-			panic("Need only funcs!")
-		}
-		t := v.Type()
-		if t.NumIn() != 2 || t.NumOut() != 0 {
-			panic("Need 2 in and 0 out params!")
-		}
-		argT := t.In(0)
-		switch argT.Kind() {
-		case reflect.Ptr, reflect.Map:
-		default:
-			panic("fuzzFunc must take pointer or map type")
-		}
-		if t.In(1) != reflect.TypeOf(Continue{}) {
-			panic("fuzzFunc's second parameter must be type fuzz.Continue")
-		}
-		f.fuzzFuncs[argT] = v
-	}
-	return f
-}
-
-// RandSource causes f to get values from the given source of randomness.
-// Use if you want deterministic fuzzing.
-func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
-	f.r = rand.New(s)
-	return f
-}
-
-// NilChance sets the probability of creating a nil pointer, map, or slice to
-// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
-func (f *Fuzzer) NilChance(p float64) *Fuzzer {
-	if p < 0 || p > 1 {
-		panic("p should be between 0 and 1, inclusive.")
-	}
-	f.nilChance = p
-	return f
-}
-
-// NumElements sets the minimum and maximum number of elements that will be
-// added to a non-nil map or slice.
-func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
-	if atLeast > atMost {
-		panic("atLeast must be <= atMost")
-	}
-	if atLeast < 0 {
-		panic("atLeast must be >= 0")
-	}
-	f.minElements = atLeast
-	f.maxElements = atMost
-	return f
-}
-
-func (f *Fuzzer) genElementCount() int {
-	if f.minElements == f.maxElements {
-		return f.minElements
-	}
-	return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
-}
-
-func (f *Fuzzer) genShouldFill() bool {
-	return f.r.Float64() > f.nilChance
-}
-
-// MaxDepth sets the maximum number of recursive fuzz calls that will be made
-// before stopping.  This includes struct members, pointers, and map and slice
-// elements.
-func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
-	f.maxDepth = d
-	return f
-}
-
-// Fuzz recursively fills all of obj's fields with something random.  First
-// this tries to find a custom fuzz function (see Funcs).  If there is no
-// custom function this tests whether the object implements fuzz.Interface and,
-// if so, calls Fuzz on it to fuzz itself.  If that fails, this will see if
-// there is a default fuzz function provided by this package.  If all of that
-// fails, this will generate random values for all primitive fields and then
-// recurse for all non-primitives.
-//
-// This is safe for cyclic or tree-like structs, up to a limit.  Use the
-// MaxDepth method to adjust how deep you need it to recurse.
-//
-// obj must be a pointer. Only exported (public) fields can be set (thanks,
-// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
-// fields.
-func (f *Fuzzer) Fuzz(obj interface{}) {
-	v := reflect.ValueOf(obj)
-	if v.Kind() != reflect.Ptr {
-		panic("needed ptr!")
-	}
-	v = v.Elem()
-	f.fuzzWithContext(v, 0)
-}
-
-// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance.  This applies only to obj and not other instances of obj's
-// type.
-// Not safe for cyclic or tree-like structs!
-// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
-// Intended for tests, so will panic on bad input or unimplemented fields.
-func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
-	v := reflect.ValueOf(obj)
-	if v.Kind() != reflect.Ptr {
-		panic("needed ptr!")
-	}
-	v = v.Elem()
-	f.fuzzWithContext(v, flagNoCustomFuzz)
-}
-
-const (
-	// Do not try to find a custom fuzz function.  Does not apply recursively.
-	flagNoCustomFuzz uint64 = 1 << iota
-)
-
-func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
-	fc := &fuzzerContext{fuzzer: f}
-	fc.doFuzz(v, flags)
-}
-
-// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
-// be thread-safe.
-type fuzzerContext struct {
-	fuzzer   *Fuzzer
-	curDepth int
-}
-
-func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
-	if fc.curDepth >= fc.fuzzer.maxDepth {
-		return
-	}
-	fc.curDepth++
-	defer func() { fc.curDepth-- }()
-
-	if !v.CanSet() {
-		return
-	}
-
-	if flags&flagNoCustomFuzz == 0 {
-		// Check for both pointer and non-pointer custom functions.
-		if v.CanAddr() && fc.tryCustom(v.Addr()) {
-			return
-		}
-		if fc.tryCustom(v) {
-			return
-		}
-	}
-
-	if fn, ok := fillFuncMap[v.Kind()]; ok {
-		fn(v, fc.fuzzer.r)
-		return
-	}
-	switch v.Kind() {
-	case reflect.Map:
-		if fc.fuzzer.genShouldFill() {
-			v.Set(reflect.MakeMap(v.Type()))
-			n := fc.fuzzer.genElementCount()
-			for i := 0; i < n; i++ {
-				key := reflect.New(v.Type().Key()).Elem()
-				fc.doFuzz(key, 0)
-				val := reflect.New(v.Type().Elem()).Elem()
-				fc.doFuzz(val, 0)
-				v.SetMapIndex(key, val)
-			}
-			return
-		}
-		v.Set(reflect.Zero(v.Type()))
-	case reflect.Ptr:
-		if fc.fuzzer.genShouldFill() {
-			v.Set(reflect.New(v.Type().Elem()))
-			fc.doFuzz(v.Elem(), 0)
-			return
-		}
-		v.Set(reflect.Zero(v.Type()))
-	case reflect.Slice:
-		if fc.fuzzer.genShouldFill() {
-			n := fc.fuzzer.genElementCount()
-			v.Set(reflect.MakeSlice(v.Type(), n, n))
-			for i := 0; i < n; i++ {
-				fc.doFuzz(v.Index(i), 0)
-			}
-			return
-		}
-		v.Set(reflect.Zero(v.Type()))
-	case reflect.Array:
-		if fc.fuzzer.genShouldFill() {
-			n := v.Len()
-			for i := 0; i < n; i++ {
-				fc.doFuzz(v.Index(i), 0)
-			}
-			return
-		}
-		v.Set(reflect.Zero(v.Type()))
-	case reflect.Struct:
-		for i := 0; i < v.NumField(); i++ {
-			fc.doFuzz(v.Field(i), 0)
-		}
-	case reflect.Chan:
-		fallthrough
-	case reflect.Func:
-		fallthrough
-	case reflect.Interface:
-		fallthrough
-	default:
-		panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
-	}
-}
-
-// tryCustom searches for custom handlers, and returns true iff it finds a match
-// and successfully randomizes v.
-func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
-	// First: see if we have a fuzz function for it.
-	doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
-	if !ok {
-		// Second: see if it can fuzz itself.
-		if v.CanInterface() {
-			intf := v.Interface()
-			if fuzzable, ok := intf.(Interface); ok {
-				fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
-				return true
-			}
-		}
-		// Finally: see if there is a default fuzz function.
-		doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
-		if !ok {
-			return false
-		}
-	}
-
-	switch v.Kind() {
-	case reflect.Ptr:
-		if v.IsNil() {
-			if !v.CanSet() {
-				return false
-			}
-			v.Set(reflect.New(v.Type().Elem()))
-		}
-	case reflect.Map:
-		if v.IsNil() {
-			if !v.CanSet() {
-				return false
-			}
-			v.Set(reflect.MakeMap(v.Type()))
-		}
-	default:
-		return false
-	}
-
-	doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
-		fc:   fc,
-		Rand: fc.fuzzer.r,
-	})})
-	return true
-}
-
-// Interface represents an object that knows how to fuzz itself.  Any time we
-// find a type that implements this interface we will delegate the act of
-// fuzzing itself.
-type Interface interface {
-	Fuzz(c Continue)
-}
-
-// Continue can be passed to custom fuzzing functions to allow them to use
-// the correct source of randomness and to continue fuzzing their members.
-type Continue struct {
-	fc *fuzzerContext
-
-	// For convenience, Continue implements rand.Rand via embedding.
-	// Use this for generating any randomness if you want your fuzzing
-	// to be repeatable for a given seed.
-	*rand.Rand
-}
-
-// Fuzz continues fuzzing obj. obj must be a pointer.
-func (c Continue) Fuzz(obj interface{}) {
-	v := reflect.ValueOf(obj)
-	if v.Kind() != reflect.Ptr {
-		panic("needed ptr!")
-	}
-	v = v.Elem()
-	c.fc.doFuzz(v, 0)
-}
-
-// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
-// obj's type will not be called and obj will not be tested for fuzz.Interface
-// conformance.  This applies only to obj and not other instances of obj's
-// type.
-func (c Continue) FuzzNoCustom(obj interface{}) {
-	v := reflect.ValueOf(obj)
-	if v.Kind() != reflect.Ptr {
-		panic("needed ptr!")
-	}
-	v = v.Elem()
-	c.fc.doFuzz(v, flagNoCustomFuzz)
-}
-
-// RandString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func (c Continue) RandString() string {
-	return randString(c.Rand)
-}
-
-// RandUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func (c Continue) RandUint64() uint64 {
-	return randUint64(c.Rand)
-}
-
-// RandBool returns true or false randomly.
-func (c Continue) RandBool() bool {
-	return randBool(c.Rand)
-}
-
-func fuzzInt(v reflect.Value, r *rand.Rand) {
-	v.SetInt(int64(randUint64(r)))
-}
-
-func fuzzUint(v reflect.Value, r *rand.Rand) {
-	v.SetUint(randUint64(r))
-}
-
-func fuzzTime(t *time.Time, c Continue) {
-	var sec, nsec int64
-	// Allow for about 1000 years of random time values, which keeps things
-	// like JSON parsing reasonably happy.
-	sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
-	c.Fuzz(&nsec)
-	*t = time.Unix(sec, nsec)
-}
-
-var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
-	reflect.Bool: func(v reflect.Value, r *rand.Rand) {
-		v.SetBool(randBool(r))
-	},
-	reflect.Int:     fuzzInt,
-	reflect.Int8:    fuzzInt,
-	reflect.Int16:   fuzzInt,
-	reflect.Int32:   fuzzInt,
-	reflect.Int64:   fuzzInt,
-	reflect.Uint:    fuzzUint,
-	reflect.Uint8:   fuzzUint,
-	reflect.Uint16:  fuzzUint,
-	reflect.Uint32:  fuzzUint,
-	reflect.Uint64:  fuzzUint,
-	reflect.Uintptr: fuzzUint,
-	reflect.Float32: func(v reflect.Value, r *rand.Rand) {
-		v.SetFloat(float64(r.Float32()))
-	},
-	reflect.Float64: func(v reflect.Value, r *rand.Rand) {
-		v.SetFloat(r.Float64())
-	},
-	reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
-		panic("unimplemented")
-	},
-	reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
-		panic("unimplemented")
-	},
-	reflect.String: func(v reflect.Value, r *rand.Rand) {
-		v.SetString(randString(r))
-	},
-	reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
-		panic("unimplemented")
-	},
-}
-
-// randBool returns true or false randomly.
-func randBool(r *rand.Rand) bool {
-	if r.Int()&1 == 1 {
-		return true
-	}
-	return false
-}
-
-type charRange struct {
-	first, last rune
-}
-
-// choose returns a random unicode character from the given range, using the
-// given randomness source.
-func (r *charRange) choose(rand *rand.Rand) rune {
-	count := int64(r.last - r.first)
-	return r.first + rune(rand.Int63n(count))
-}
-
-var unicodeRanges = []charRange{
-	{' ', '~'},           // ASCII characters
-	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
-	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
-}
-
-// randString makes a random string up to 20 characters long. The returned string
-// may include a variety of (valid) UTF-8 encodings.
-func randString(r *rand.Rand) string {
-	n := r.Intn(20)
-	runes := make([]rune, n)
-	for i := range runes {
-		runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
-	}
-	return string(runes)
-}
-
-// randUint64 makes random 64 bit numbers.
-// Weirdly, rand doesn't have a function that gives you 64 random bits.
-func randUint64(r *rand.Rand) uint64 {
-	return uint64(r.Uint32())<<32 | uint64(r.Uint32())
-}
diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE
deleted file mode 100644
index 6b0b127..0000000
--- a/vendor/github.com/googleapis/gnostic/LICENSE
+++ /dev/null
@@ -1,203 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
deleted file mode 100644
index 5351f36..0000000
--- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
+++ /dev/null
@@ -1,8728 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// THIS FILE IS AUTOMATICALLY GENERATED.
-
-package openapi_v2
-
-import (
-	"fmt"
-	"github.com/googleapis/gnostic/compiler"
-	"gopkg.in/yaml.v2"
-	"regexp"
-	"strings"
-)
-
-// Version returns the package name (and OpenAPI version).
-func Version() string {
-	return "openapi_v2"
-}
-
-// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not.
-func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) {
-	errors := make([]error, 0)
-	x := &AdditionalPropertiesItem{}
-	matched := false
-	// Schema schema = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
-			if matchingError == nil {
-				x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// bool boolean = 2;
-	boolValue, ok := in.(bool)
-	if ok {
-		x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewAny creates an object of type Any if possible, returning an error if not.
-func NewAny(in interface{}, context *compiler.Context) (*Any, error) {
-	errors := make([]error, 0)
-	x := &Any{}
-	bytes, _ := yaml.Marshal(in)
-	x.Yaml = string(bytes)
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not.
-func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) {
-	errors := make([]error, 0)
-	x := &ApiKeySecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"in", "name", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "in", "name", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [apiKey]
-			if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 2;
-		v2 := compiler.MapValueForKey(m, "name")
-		if v2 != nil {
-			x.Name, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 3;
-		v3 := compiler.MapValueForKey(m, "in")
-		if v3 != nil {
-			x.In, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [header query]
-			if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 4;
-		v4 := compiler.MapValueForKey(m, "description")
-		if v4 != nil {
-			x.Description, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 5;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not.
-func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) {
-	errors := make([]error, 0)
-	x := &BasicAuthenticationSecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [basic]
-			if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 2;
-		v2 := compiler.MapValueForKey(m, "description")
-		if v2 != nil {
-			x.Description, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 3;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not.
-func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) {
-	errors := make([]error, 0)
-	x := &BodyParameter{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"in", "name", "schema"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "in", "name", "required", "schema"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string description = 1;
-		v1 := compiler.MapValueForKey(m, "description")
-		if v1 != nil {
-			x.Description, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 2;
-		v2 := compiler.MapValueForKey(m, "name")
-		if v2 != nil {
-			x.Name, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 3;
-		v3 := compiler.MapValueForKey(m, "in")
-		if v3 != nil {
-			x.In, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [body]
-			if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool required = 4;
-		v4 := compiler.MapValueForKey(m, "required")
-		if v4 != nil {
-			x.Required, ok = v4.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Schema schema = 5;
-		v5 := compiler.MapValueForKey(m, "schema")
-		if v5 != nil {
-			var err error
-			x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 6;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewContact creates an object of type Contact if possible, returning an error if not.
-func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
-	errors := make([]error, 0)
-	x := &Contact{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"email", "name", "url"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string url = 2;
-		v2 := compiler.MapValueForKey(m, "url")
-		if v2 != nil {
-			x.Url, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string email = 3;
-		v3 := compiler.MapValueForKey(m, "email")
-		if v3 != nil {
-			x.Email, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 4;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewDefault creates an object of type Default if possible, returning an error if not.
-func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
-	errors := make([]error, 0)
-	x := &Default{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedAny additional_properties = 1;
-		// MAP: Any
-		x.AdditionalProperties = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedAny{}
-				pair.Name = k
-				result := &Any{}
-				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-				if handled {
-					if err != nil {
-						errors = append(errors, err)
-					} else {
-						bytes, _ := yaml.Marshal(v)
-						result.Yaml = string(bytes)
-						result.Value = resultFromExt
-						pair.Value = result
-					}
-				} else {
-					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewDefinitions creates an object of type Definitions if possible, returning an error if not.
-func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) {
-	errors := make([]error, 0)
-	x := &Definitions{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedSchema additional_properties = 1;
-		// MAP: Schema
-		x.AdditionalProperties = make([]*NamedSchema, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedSchema{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewDocument creates an object of type Document if possible, returning an error if not.
-func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
-	errors := make([]error, 0)
-	x := &Document{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"info", "paths", "swagger"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string swagger = 1;
-		v1 := compiler.MapValueForKey(m, "swagger")
-		if v1 != nil {
-			x.Swagger, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [2.0]
-			if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) {
-				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Info info = 2;
-		v2 := compiler.MapValueForKey(m, "info")
-		if v2 != nil {
-			var err error
-			x.Info, err = NewInfo(v2, compiler.NewContext("info", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string host = 3;
-		v3 := compiler.MapValueForKey(m, "host")
-		if v3 != nil {
-			x.Host, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string base_path = 4;
-		v4 := compiler.MapValueForKey(m, "basePath")
-		if v4 != nil {
-			x.BasePath, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string schemes = 5;
-		v5 := compiler.MapValueForKey(m, "schemes")
-		if v5 != nil {
-			v, ok := v5.([]interface{})
-			if ok {
-				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [http https ws wss]
-			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
-				message := fmt.Sprintf("has unexpected value for schemes: %+v", v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string consumes = 6;
-		v6 := compiler.MapValueForKey(m, "consumes")
-		if v6 != nil {
-			v, ok := v6.([]interface{})
-			if ok {
-				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string produces = 7;
-		v7 := compiler.MapValueForKey(m, "produces")
-		if v7 != nil {
-			v, ok := v7.([]interface{})
-			if ok {
-				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Paths paths = 8;
-		v8 := compiler.MapValueForKey(m, "paths")
-		if v8 != nil {
-			var err error
-			x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Definitions definitions = 9;
-		v9 := compiler.MapValueForKey(m, "definitions")
-		if v9 != nil {
-			var err error
-			x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// ParameterDefinitions parameters = 10;
-		v10 := compiler.MapValueForKey(m, "parameters")
-		if v10 != nil {
-			var err error
-			x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// ResponseDefinitions responses = 11;
-		v11 := compiler.MapValueForKey(m, "responses")
-		if v11 != nil {
-			var err error
-			x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated SecurityRequirement security = 12;
-		v12 := compiler.MapValueForKey(m, "security")
-		if v12 != nil {
-			// repeated SecurityRequirement
-			x.Security = make([]*SecurityRequirement, 0)
-			a, ok := v12.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Security = append(x.Security, y)
-				}
-			}
-		}
-		// SecurityDefinitions security_definitions = 13;
-		v13 := compiler.MapValueForKey(m, "securityDefinitions")
-		if v13 != nil {
-			var err error
-			x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated Tag tags = 14;
-		v14 := compiler.MapValueForKey(m, "tags")
-		if v14 != nil {
-			// repeated Tag
-			x.Tags = make([]*Tag, 0)
-			a, ok := v14.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewTag(item, compiler.NewContext("tags", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Tags = append(x.Tags, y)
-				}
-			}
-		}
-		// ExternalDocs external_docs = 15;
-		v15 := compiler.MapValueForKey(m, "externalDocs")
-		if v15 != nil {
-			var err error
-			x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 16;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewExamples creates an object of type Examples if possible, returning an error if not.
-func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
-	errors := make([]error, 0)
-	x := &Examples{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedAny additional_properties = 1;
-		// MAP: Any
-		x.AdditionalProperties = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedAny{}
-				pair.Name = k
-				result := &Any{}
-				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-				if handled {
-					if err != nil {
-						errors = append(errors, err)
-					} else {
-						bytes, _ := yaml.Marshal(v)
-						result.Yaml = string(bytes)
-						result.Value = resultFromExt
-						pair.Value = result
-					}
-				} else {
-					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not.
-func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) {
-	errors := make([]error, 0)
-	x := &ExternalDocs{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"url"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "url"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string description = 1;
-		v1 := compiler.MapValueForKey(m, "description")
-		if v1 != nil {
-			x.Description, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string url = 2;
-		v2 := compiler.MapValueForKey(m, "url")
-		if v2 != nil {
-			x.Url, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 3;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewFileSchema creates an object of type FileSchema if possible, returning an error if not.
-func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) {
-	errors := make([]error, 0)
-	x := &FileSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string format = 1;
-		v1 := compiler.MapValueForKey(m, "format")
-		if v1 != nil {
-			x.Format, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string title = 2;
-		v2 := compiler.MapValueForKey(m, "title")
-		if v2 != nil {
-			x.Title, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 4;
-		v4 := compiler.MapValueForKey(m, "default")
-		if v4 != nil {
-			var err error
-			x.Default, err = NewAny(v4, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated string required = 5;
-		v5 := compiler.MapValueForKey(m, "required")
-		if v5 != nil {
-			v, ok := v5.([]interface{})
-			if ok {
-				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string type = 6;
-		v6 := compiler.MapValueForKey(m, "type")
-		if v6 != nil {
-			x.Type, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [file]
-			if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool read_only = 7;
-		v7 := compiler.MapValueForKey(m, "readOnly")
-		if v7 != nil {
-			x.ReadOnly, ok = v7.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// ExternalDocs external_docs = 8;
-		v8 := compiler.MapValueForKey(m, "externalDocs")
-		if v8 != nil {
-			var err error
-			x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Any example = 9;
-		v9 := compiler.MapValueForKey(m, "example")
-		if v9 != nil {
-			var err error
-			x.Example, err = NewAny(v9, compiler.NewContext("example", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 10;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not.
-func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) {
-	errors := make([]error, 0)
-	x := &FormDataParameterSubSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// bool required = 1;
-		v1 := compiler.MapValueForKey(m, "required")
-		if v1 != nil {
-			x.Required, ok = v1.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 2;
-		v2 := compiler.MapValueForKey(m, "in")
-		if v2 != nil {
-			x.In, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [formData]
-			if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 4;
-		v4 := compiler.MapValueForKey(m, "name")
-		if v4 != nil {
-			x.Name, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool allow_empty_value = 5;
-		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
-		if v5 != nil {
-			x.AllowEmptyValue, ok = v5.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string type = 6;
-		v6 := compiler.MapValueForKey(m, "type")
-		if v6 != nil {
-			x.Type, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number boolean integer array file]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 7;
-		v7 := compiler.MapValueForKey(m, "format")
-		if v7 != nil {
-			x.Format, ok = v7.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 8;
-		v8 := compiler.MapValueForKey(m, "items")
-		if v8 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 9;
-		v9 := compiler.MapValueForKey(m, "collectionFormat")
-		if v9 != nil {
-			x.CollectionFormat, ok = v9.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes multi]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 10;
-		v10 := compiler.MapValueForKey(m, "default")
-		if v10 != nil {
-			var err error
-			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 11;
-		v11 := compiler.MapValueForKey(m, "maximum")
-		if v11 != nil {
-			switch v11 := v11.(type) {
-			case float64:
-				x.Maximum = v11
-			case float32:
-				x.Maximum = float64(v11)
-			case uint64:
-				x.Maximum = float64(v11)
-			case uint32:
-				x.Maximum = float64(v11)
-			case int64:
-				x.Maximum = float64(v11)
-			case int32:
-				x.Maximum = float64(v11)
-			case int:
-				x.Maximum = float64(v11)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 12;
-		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v12 != nil {
-			x.ExclusiveMaximum, ok = v12.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 13;
-		v13 := compiler.MapValueForKey(m, "minimum")
-		if v13 != nil {
-			switch v13 := v13.(type) {
-			case float64:
-				x.Minimum = v13
-			case float32:
-				x.Minimum = float64(v13)
-			case uint64:
-				x.Minimum = float64(v13)
-			case uint32:
-				x.Minimum = float64(v13)
-			case int64:
-				x.Minimum = float64(v13)
-			case int32:
-				x.Minimum = float64(v13)
-			case int:
-				x.Minimum = float64(v13)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 14;
-		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v14 != nil {
-			x.ExclusiveMinimum, ok = v14.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 15;
-		v15 := compiler.MapValueForKey(m, "maxLength")
-		if v15 != nil {
-			t, ok := v15.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 16;
-		v16 := compiler.MapValueForKey(m, "minLength")
-		if v16 != nil {
-			t, ok := v16.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 17;
-		v17 := compiler.MapValueForKey(m, "pattern")
-		if v17 != nil {
-			x.Pattern, ok = v17.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 18;
-		v18 := compiler.MapValueForKey(m, "maxItems")
-		if v18 != nil {
-			t, ok := v18.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 19;
-		v19 := compiler.MapValueForKey(m, "minItems")
-		if v19 != nil {
-			t, ok := v19.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 20;
-		v20 := compiler.MapValueForKey(m, "uniqueItems")
-		if v20 != nil {
-			x.UniqueItems, ok = v20.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 21;
-		v21 := compiler.MapValueForKey(m, "enum")
-		if v21 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v21.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 22;
-		v22 := compiler.MapValueForKey(m, "multipleOf")
-		if v22 != nil {
-			switch v22 := v22.(type) {
-			case float64:
-				x.MultipleOf = v22
-			case float32:
-				x.MultipleOf = float64(v22)
-			case uint64:
-				x.MultipleOf = float64(v22)
-			case uint32:
-				x.MultipleOf = float64(v22)
-			case int64:
-				x.MultipleOf = float64(v22)
-			case int32:
-				x.MultipleOf = float64(v22)
-			case int:
-				x.MultipleOf = float64(v22)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 23;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewHeader creates an object of type Header if possible, returning an error if not.
-func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
-	errors := make([]error, 0)
-	x := &Header{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number integer boolean array]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 2;
-		v2 := compiler.MapValueForKey(m, "format")
-		if v2 != nil {
-			x.Format, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 3;
-		v3 := compiler.MapValueForKey(m, "items")
-		if v3 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 4;
-		v4 := compiler.MapValueForKey(m, "collectionFormat")
-		if v4 != nil {
-			x.CollectionFormat, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 5;
-		v5 := compiler.MapValueForKey(m, "default")
-		if v5 != nil {
-			var err error
-			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 6;
-		v6 := compiler.MapValueForKey(m, "maximum")
-		if v6 != nil {
-			switch v6 := v6.(type) {
-			case float64:
-				x.Maximum = v6
-			case float32:
-				x.Maximum = float64(v6)
-			case uint64:
-				x.Maximum = float64(v6)
-			case uint32:
-				x.Maximum = float64(v6)
-			case int64:
-				x.Maximum = float64(v6)
-			case int32:
-				x.Maximum = float64(v6)
-			case int:
-				x.Maximum = float64(v6)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 7;
-		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v7 != nil {
-			x.ExclusiveMaximum, ok = v7.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 8;
-		v8 := compiler.MapValueForKey(m, "minimum")
-		if v8 != nil {
-			switch v8 := v8.(type) {
-			case float64:
-				x.Minimum = v8
-			case float32:
-				x.Minimum = float64(v8)
-			case uint64:
-				x.Minimum = float64(v8)
-			case uint32:
-				x.Minimum = float64(v8)
-			case int64:
-				x.Minimum = float64(v8)
-			case int32:
-				x.Minimum = float64(v8)
-			case int:
-				x.Minimum = float64(v8)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 9;
-		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v9 != nil {
-			x.ExclusiveMinimum, ok = v9.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 10;
-		v10 := compiler.MapValueForKey(m, "maxLength")
-		if v10 != nil {
-			t, ok := v10.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 11;
-		v11 := compiler.MapValueForKey(m, "minLength")
-		if v11 != nil {
-			t, ok := v11.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 12;
-		v12 := compiler.MapValueForKey(m, "pattern")
-		if v12 != nil {
-			x.Pattern, ok = v12.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 13;
-		v13 := compiler.MapValueForKey(m, "maxItems")
-		if v13 != nil {
-			t, ok := v13.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 14;
-		v14 := compiler.MapValueForKey(m, "minItems")
-		if v14 != nil {
-			t, ok := v14.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 15;
-		v15 := compiler.MapValueForKey(m, "uniqueItems")
-		if v15 != nil {
-			x.UniqueItems, ok = v15.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 16;
-		v16 := compiler.MapValueForKey(m, "enum")
-		if v16 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v16.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 17;
-		v17 := compiler.MapValueForKey(m, "multipleOf")
-		if v17 != nil {
-			switch v17 := v17.(type) {
-			case float64:
-				x.MultipleOf = v17
-			case float32:
-				x.MultipleOf = float64(v17)
-			case uint64:
-				x.MultipleOf = float64(v17)
-			case uint32:
-				x.MultipleOf = float64(v17)
-			case int64:
-				x.MultipleOf = float64(v17)
-			case int32:
-				x.MultipleOf = float64(v17)
-			case int:
-				x.MultipleOf = float64(v17)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 18;
-		v18 := compiler.MapValueForKey(m, "description")
-		if v18 != nil {
-			x.Description, ok = v18.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 19;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not.
-func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) {
-	errors := make([]error, 0)
-	x := &HeaderParameterSubSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// bool required = 1;
-		v1 := compiler.MapValueForKey(m, "required")
-		if v1 != nil {
-			x.Required, ok = v1.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 2;
-		v2 := compiler.MapValueForKey(m, "in")
-		if v2 != nil {
-			x.In, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [header]
-			if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 4;
-		v4 := compiler.MapValueForKey(m, "name")
-		if v4 != nil {
-			x.Name, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string type = 5;
-		v5 := compiler.MapValueForKey(m, "type")
-		if v5 != nil {
-			x.Type, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number boolean integer array]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 6;
-		v6 := compiler.MapValueForKey(m, "format")
-		if v6 != nil {
-			x.Format, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 7;
-		v7 := compiler.MapValueForKey(m, "items")
-		if v7 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 8;
-		v8 := compiler.MapValueForKey(m, "collectionFormat")
-		if v8 != nil {
-			x.CollectionFormat, ok = v8.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 9;
-		v9 := compiler.MapValueForKey(m, "default")
-		if v9 != nil {
-			var err error
-			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 10;
-		v10 := compiler.MapValueForKey(m, "maximum")
-		if v10 != nil {
-			switch v10 := v10.(type) {
-			case float64:
-				x.Maximum = v10
-			case float32:
-				x.Maximum = float64(v10)
-			case uint64:
-				x.Maximum = float64(v10)
-			case uint32:
-				x.Maximum = float64(v10)
-			case int64:
-				x.Maximum = float64(v10)
-			case int32:
-				x.Maximum = float64(v10)
-			case int:
-				x.Maximum = float64(v10)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 11;
-		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v11 != nil {
-			x.ExclusiveMaximum, ok = v11.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 12;
-		v12 := compiler.MapValueForKey(m, "minimum")
-		if v12 != nil {
-			switch v12 := v12.(type) {
-			case float64:
-				x.Minimum = v12
-			case float32:
-				x.Minimum = float64(v12)
-			case uint64:
-				x.Minimum = float64(v12)
-			case uint32:
-				x.Minimum = float64(v12)
-			case int64:
-				x.Minimum = float64(v12)
-			case int32:
-				x.Minimum = float64(v12)
-			case int:
-				x.Minimum = float64(v12)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 13;
-		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v13 != nil {
-			x.ExclusiveMinimum, ok = v13.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 14;
-		v14 := compiler.MapValueForKey(m, "maxLength")
-		if v14 != nil {
-			t, ok := v14.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 15;
-		v15 := compiler.MapValueForKey(m, "minLength")
-		if v15 != nil {
-			t, ok := v15.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 16;
-		v16 := compiler.MapValueForKey(m, "pattern")
-		if v16 != nil {
-			x.Pattern, ok = v16.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 17;
-		v17 := compiler.MapValueForKey(m, "maxItems")
-		if v17 != nil {
-			t, ok := v17.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 18;
-		v18 := compiler.MapValueForKey(m, "minItems")
-		if v18 != nil {
-			t, ok := v18.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 19;
-		v19 := compiler.MapValueForKey(m, "uniqueItems")
-		if v19 != nil {
-			x.UniqueItems, ok = v19.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 20;
-		v20 := compiler.MapValueForKey(m, "enum")
-		if v20 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v20.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 21;
-		v21 := compiler.MapValueForKey(m, "multipleOf")
-		if v21 != nil {
-			switch v21 := v21.(type) {
-			case float64:
-				x.MultipleOf = v21
-			case float32:
-				x.MultipleOf = float64(v21)
-			case uint64:
-				x.MultipleOf = float64(v21)
-			case uint32:
-				x.MultipleOf = float64(v21)
-			case int64:
-				x.MultipleOf = float64(v21)
-			case int32:
-				x.MultipleOf = float64(v21)
-			case int:
-				x.MultipleOf = float64(v21)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 22;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewHeaders creates an object of type Headers if possible, returning an error if not.
-func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
-	errors := make([]error, 0)
-	x := &Headers{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedHeader additional_properties = 1;
-		// MAP: Header
-		x.AdditionalProperties = make([]*NamedHeader, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedHeader{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewHeader(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewInfo creates an object of type Info if possible, returning an error if not.
-func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
-	errors := make([]error, 0)
-	x := &Info{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"title", "version"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string title = 1;
-		v1 := compiler.MapValueForKey(m, "title")
-		if v1 != nil {
-			x.Title, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string version = 2;
-		v2 := compiler.MapValueForKey(m, "version")
-		if v2 != nil {
-			x.Version, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string terms_of_service = 4;
-		v4 := compiler.MapValueForKey(m, "termsOfService")
-		if v4 != nil {
-			x.TermsOfService, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Contact contact = 5;
-		v5 := compiler.MapValueForKey(m, "contact")
-		if v5 != nil {
-			var err error
-			x.Contact, err = NewContact(v5, compiler.NewContext("contact", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// License license = 6;
-		v6 := compiler.MapValueForKey(m, "license")
-		if v6 != nil {
-			var err error
-			x.License, err = NewLicense(v6, compiler.NewContext("license", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 7;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not.
-func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) {
-	errors := make([]error, 0)
-	x := &ItemsItem{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		x.Schema = make([]*Schema, 0)
-		y, err := NewSchema(m, compiler.NewContext("<array>", context))
-		if err != nil {
-			return nil, err
-		}
-		x.Schema = append(x.Schema, y)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewJsonReference creates an object of type JsonReference if possible, returning an error if not.
-func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) {
-	errors := make([]error, 0)
-	x := &JsonReference{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"$ref"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"$ref", "description"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string _ref = 1;
-		v1 := compiler.MapValueForKey(m, "$ref")
-		if v1 != nil {
-			x.XRef, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 2;
-		v2 := compiler.MapValueForKey(m, "description")
-		if v2 != nil {
-			x.Description, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewLicense creates an object of type License if possible, returning an error if not.
-func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
-	errors := make([]error, 0)
-	x := &License{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"name"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"name", "url"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string url = 2;
-		v2 := compiler.MapValueForKey(m, "url")
-		if v2 != nil {
-			x.Url, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 3;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedAny creates an object of type NamedAny if possible, returning an error if not.
-func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
-	errors := make([]error, 0)
-	x := &NamedAny{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewAny(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not.
-func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) {
-	errors := make([]error, 0)
-	x := &NamedHeader{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Header value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewHeader(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not.
-func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) {
-	errors := make([]error, 0)
-	x := &NamedParameter{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Parameter value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewParameter(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not.
-func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) {
-	errors := make([]error, 0)
-	x := &NamedPathItem{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PathItem value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewPathItem(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not.
-func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) {
-	errors := make([]error, 0)
-	x := &NamedResponse{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Response value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewResponse(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not.
-func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) {
-	errors := make([]error, 0)
-	x := &NamedResponseValue{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// ResponseValue value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not.
-func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) {
-	errors := make([]error, 0)
-	x := &NamedSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Schema value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewSchema(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not.
-func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) {
-	errors := make([]error, 0)
-	x := &NamedSecurityDefinitionsItem{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// SecurityDefinitionsItem value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedString creates an object of type NamedString if possible, returning an error if not.
-func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) {
-	errors := make([]error, 0)
-	x := &NamedString{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			x.Value, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not.
-func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) {
-	errors := make([]error, 0)
-	x := &NamedStringArray{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"name", "value"}
-		var allowedPatterns []*regexp.Regexp
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// StringArray value = 2;
-		v2 := compiler.MapValueForKey(m, "value")
-		if v2 != nil {
-			var err error
-			x.Value, err = NewStringArray(v2, compiler.NewContext("value", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not.
-func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) {
-	errors := make([]error, 0)
-	x := &NonBodyParameter{}
-	matched := false
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"in", "name", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// HeaderParameterSubSchema header_parameter_sub_schema = 1;
-		{
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context))
-			if matchingError == nil {
-				x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-		// FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
-		{
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context))
-			if matchingError == nil {
-				x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-		// QueryParameterSubSchema query_parameter_sub_schema = 3;
-		{
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context))
-			if matchingError == nil {
-				x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-		// PathParameterSubSchema path_parameter_sub_schema = 4;
-		{
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context))
-			if matchingError == nil {
-				x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not.
-func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) {
-	errors := make([]error, 0)
-	x := &Oauth2AccessCodeSecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [oauth2]
-			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string flow = 2;
-		v2 := compiler.MapValueForKey(m, "flow")
-		if v2 != nil {
-			x.Flow, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [accessCode]
-			if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Oauth2Scopes scopes = 3;
-		v3 := compiler.MapValueForKey(m, "scopes")
-		if v3 != nil {
-			var err error
-			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string authorization_url = 4;
-		v4 := compiler.MapValueForKey(m, "authorizationUrl")
-		if v4 != nil {
-			x.AuthorizationUrl, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string token_url = 5;
-		v5 := compiler.MapValueForKey(m, "tokenUrl")
-		if v5 != nil {
-			x.TokenUrl, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 6;
-		v6 := compiler.MapValueForKey(m, "description")
-		if v6 != nil {
-			x.Description, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 7;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not.
-func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) {
-	errors := make([]error, 0)
-	x := &Oauth2ApplicationSecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"flow", "tokenUrl", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [oauth2]
-			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string flow = 2;
-		v2 := compiler.MapValueForKey(m, "flow")
-		if v2 != nil {
-			x.Flow, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [application]
-			if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Oauth2Scopes scopes = 3;
-		v3 := compiler.MapValueForKey(m, "scopes")
-		if v3 != nil {
-			var err error
-			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string token_url = 4;
-		v4 := compiler.MapValueForKey(m, "tokenUrl")
-		if v4 != nil {
-			x.TokenUrl, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 5;
-		v5 := compiler.MapValueForKey(m, "description")
-		if v5 != nil {
-			x.Description, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 6;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not.
-func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) {
-	errors := make([]error, 0)
-	x := &Oauth2ImplicitSecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"authorizationUrl", "flow", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [oauth2]
-			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string flow = 2;
-		v2 := compiler.MapValueForKey(m, "flow")
-		if v2 != nil {
-			x.Flow, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [implicit]
-			if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Oauth2Scopes scopes = 3;
-		v3 := compiler.MapValueForKey(m, "scopes")
-		if v3 != nil {
-			var err error
-			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string authorization_url = 4;
-		v4 := compiler.MapValueForKey(m, "authorizationUrl")
-		if v4 != nil {
-			x.AuthorizationUrl, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 5;
-		v5 := compiler.MapValueForKey(m, "description")
-		if v5 != nil {
-			x.Description, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 6;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not.
-func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) {
-	errors := make([]error, 0)
-	x := &Oauth2PasswordSecurity{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"flow", "tokenUrl", "type"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [oauth2]
-			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string flow = 2;
-		v2 := compiler.MapValueForKey(m, "flow")
-		if v2 != nil {
-			x.Flow, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [password]
-			if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) {
-				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Oauth2Scopes scopes = 3;
-		v3 := compiler.MapValueForKey(m, "scopes")
-		if v3 != nil {
-			var err error
-			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string token_url = 4;
-		v4 := compiler.MapValueForKey(m, "tokenUrl")
-		if v4 != nil {
-			x.TokenUrl, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 5;
-		v5 := compiler.MapValueForKey(m, "description")
-		if v5 != nil {
-			x.Description, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 6;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not.
-func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) {
-	errors := make([]error, 0)
-	x := &Oauth2Scopes{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedString additional_properties = 1;
-		// MAP: string
-		x.AdditionalProperties = make([]*NamedString, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedString{}
-				pair.Name = k
-				pair.Value = v.(string)
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewOperation creates an object of type Operation if possible, returning an error if not.
-func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) {
-	errors := make([]error, 0)
-	x := &Operation{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"responses"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// repeated string tags = 1;
-		v1 := compiler.MapValueForKey(m, "tags")
-		if v1 != nil {
-			v, ok := v1.([]interface{})
-			if ok {
-				x.Tags = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string summary = 2;
-		v2 := compiler.MapValueForKey(m, "summary")
-		if v2 != nil {
-			x.Summary, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// ExternalDocs external_docs = 4;
-		v4 := compiler.MapValueForKey(m, "externalDocs")
-		if v4 != nil {
-			var err error
-			x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string operation_id = 5;
-		v5 := compiler.MapValueForKey(m, "operationId")
-		if v5 != nil {
-			x.OperationId, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string produces = 6;
-		v6 := compiler.MapValueForKey(m, "produces")
-		if v6 != nil {
-			v, ok := v6.([]interface{})
-			if ok {
-				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string consumes = 7;
-		v7 := compiler.MapValueForKey(m, "consumes")
-		if v7 != nil {
-			v, ok := v7.([]interface{})
-			if ok {
-				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated ParametersItem parameters = 8;
-		v8 := compiler.MapValueForKey(m, "parameters")
-		if v8 != nil {
-			// repeated ParametersItem
-			x.Parameters = make([]*ParametersItem, 0)
-			a, ok := v8.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Parameters = append(x.Parameters, y)
-				}
-			}
-		}
-		// Responses responses = 9;
-		v9 := compiler.MapValueForKey(m, "responses")
-		if v9 != nil {
-			var err error
-			x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated string schemes = 10;
-		v10 := compiler.MapValueForKey(m, "schemes")
-		if v10 != nil {
-			v, ok := v10.([]interface{})
-			if ok {
-				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [http https ws wss]
-			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
-				message := fmt.Sprintf("has unexpected value for schemes: %+v", v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool deprecated = 11;
-		v11 := compiler.MapValueForKey(m, "deprecated")
-		if v11 != nil {
-			x.Deprecated, ok = v11.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated SecurityRequirement security = 12;
-		v12 := compiler.MapValueForKey(m, "security")
-		if v12 != nil {
-			// repeated SecurityRequirement
-			x.Security = make([]*SecurityRequirement, 0)
-			a, ok := v12.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Security = append(x.Security, y)
-				}
-			}
-		}
-		// repeated NamedAny vendor_extension = 13;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewParameter creates an object of type Parameter if possible, returning an error if not.
-func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) {
-	errors := make([]error, 0)
-	x := &Parameter{}
-	matched := false
-	// BodyParameter body_parameter = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context))
-			if matchingError == nil {
-				x.Oneof = &Parameter_BodyParameter{BodyParameter: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// NonBodyParameter non_body_parameter = 2;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context))
-			if matchingError == nil {
-				x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not.
-func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) {
-	errors := make([]error, 0)
-	x := &ParameterDefinitions{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedParameter additional_properties = 1;
-		// MAP: Parameter
-		x.AdditionalProperties = make([]*NamedParameter, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedParameter{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewParameter(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not.
-func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) {
-	errors := make([]error, 0)
-	x := &ParametersItem{}
-	matched := false
-	// Parameter parameter = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewParameter(m, compiler.NewContext("parameter", context))
-			if matchingError == nil {
-				x.Oneof = &ParametersItem_Parameter{Parameter: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// JsonReference json_reference = 2;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
-			if matchingError == nil {
-				x.Oneof = &ParametersItem_JsonReference{JsonReference: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewPathItem creates an object of type PathItem if possible, returning an error if not.
-func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
-	errors := make([]error, 0)
-	x := &PathItem{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string _ref = 1;
-		v1 := compiler.MapValueForKey(m, "$ref")
-		if v1 != nil {
-			x.XRef, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Operation get = 2;
-		v2 := compiler.MapValueForKey(m, "get")
-		if v2 != nil {
-			var err error
-			x.Get, err = NewOperation(v2, compiler.NewContext("get", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation put = 3;
-		v3 := compiler.MapValueForKey(m, "put")
-		if v3 != nil {
-			var err error
-			x.Put, err = NewOperation(v3, compiler.NewContext("put", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation post = 4;
-		v4 := compiler.MapValueForKey(m, "post")
-		if v4 != nil {
-			var err error
-			x.Post, err = NewOperation(v4, compiler.NewContext("post", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation delete = 5;
-		v5 := compiler.MapValueForKey(m, "delete")
-		if v5 != nil {
-			var err error
-			x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation options = 6;
-		v6 := compiler.MapValueForKey(m, "options")
-		if v6 != nil {
-			var err error
-			x.Options, err = NewOperation(v6, compiler.NewContext("options", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation head = 7;
-		v7 := compiler.MapValueForKey(m, "head")
-		if v7 != nil {
-			var err error
-			x.Head, err = NewOperation(v7, compiler.NewContext("head", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Operation patch = 8;
-		v8 := compiler.MapValueForKey(m, "patch")
-		if v8 != nil {
-			var err error
-			x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated ParametersItem parameters = 9;
-		v9 := compiler.MapValueForKey(m, "parameters")
-		if v9 != nil {
-			// repeated ParametersItem
-			x.Parameters = make([]*ParametersItem, 0)
-			a, ok := v9.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Parameters = append(x.Parameters, y)
-				}
-			}
-		}
-		// repeated NamedAny vendor_extension = 10;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not.
-func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) {
-	errors := make([]error, 0)
-	x := &PathParameterSubSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"required"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// bool required = 1;
-		v1 := compiler.MapValueForKey(m, "required")
-		if v1 != nil {
-			x.Required, ok = v1.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 2;
-		v2 := compiler.MapValueForKey(m, "in")
-		if v2 != nil {
-			x.In, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [path]
-			if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 4;
-		v4 := compiler.MapValueForKey(m, "name")
-		if v4 != nil {
-			x.Name, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string type = 5;
-		v5 := compiler.MapValueForKey(m, "type")
-		if v5 != nil {
-			x.Type, ok = v5.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number boolean integer array]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 6;
-		v6 := compiler.MapValueForKey(m, "format")
-		if v6 != nil {
-			x.Format, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 7;
-		v7 := compiler.MapValueForKey(m, "items")
-		if v7 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 8;
-		v8 := compiler.MapValueForKey(m, "collectionFormat")
-		if v8 != nil {
-			x.CollectionFormat, ok = v8.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 9;
-		v9 := compiler.MapValueForKey(m, "default")
-		if v9 != nil {
-			var err error
-			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 10;
-		v10 := compiler.MapValueForKey(m, "maximum")
-		if v10 != nil {
-			switch v10 := v10.(type) {
-			case float64:
-				x.Maximum = v10
-			case float32:
-				x.Maximum = float64(v10)
-			case uint64:
-				x.Maximum = float64(v10)
-			case uint32:
-				x.Maximum = float64(v10)
-			case int64:
-				x.Maximum = float64(v10)
-			case int32:
-				x.Maximum = float64(v10)
-			case int:
-				x.Maximum = float64(v10)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 11;
-		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v11 != nil {
-			x.ExclusiveMaximum, ok = v11.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 12;
-		v12 := compiler.MapValueForKey(m, "minimum")
-		if v12 != nil {
-			switch v12 := v12.(type) {
-			case float64:
-				x.Minimum = v12
-			case float32:
-				x.Minimum = float64(v12)
-			case uint64:
-				x.Minimum = float64(v12)
-			case uint32:
-				x.Minimum = float64(v12)
-			case int64:
-				x.Minimum = float64(v12)
-			case int32:
-				x.Minimum = float64(v12)
-			case int:
-				x.Minimum = float64(v12)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 13;
-		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v13 != nil {
-			x.ExclusiveMinimum, ok = v13.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 14;
-		v14 := compiler.MapValueForKey(m, "maxLength")
-		if v14 != nil {
-			t, ok := v14.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 15;
-		v15 := compiler.MapValueForKey(m, "minLength")
-		if v15 != nil {
-			t, ok := v15.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 16;
-		v16 := compiler.MapValueForKey(m, "pattern")
-		if v16 != nil {
-			x.Pattern, ok = v16.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 17;
-		v17 := compiler.MapValueForKey(m, "maxItems")
-		if v17 != nil {
-			t, ok := v17.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 18;
-		v18 := compiler.MapValueForKey(m, "minItems")
-		if v18 != nil {
-			t, ok := v18.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 19;
-		v19 := compiler.MapValueForKey(m, "uniqueItems")
-		if v19 != nil {
-			x.UniqueItems, ok = v19.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 20;
-		v20 := compiler.MapValueForKey(m, "enum")
-		if v20 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v20.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 21;
-		v21 := compiler.MapValueForKey(m, "multipleOf")
-		if v21 != nil {
-			switch v21 := v21.(type) {
-			case float64:
-				x.MultipleOf = v21
-			case float32:
-				x.MultipleOf = float64(v21)
-			case uint64:
-				x.MultipleOf = float64(v21)
-			case uint32:
-				x.MultipleOf = float64(v21)
-			case int64:
-				x.MultipleOf = float64(v21)
-			case int32:
-				x.MultipleOf = float64(v21)
-			case int:
-				x.MultipleOf = float64(v21)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 22;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewPaths creates an object of type Paths if possible, returning an error if not.
-func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
-	errors := make([]error, 0)
-	x := &Paths{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{}
-		allowedPatterns := []*regexp.Regexp{pattern0, pattern1}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// repeated NamedAny vendor_extension = 1;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-		// repeated NamedPathItem path = 2;
-		// MAP: PathItem ^/
-		x.Path = make([]*NamedPathItem, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "/") {
-					pair := &NamedPathItem{}
-					pair.Name = k
-					var err error
-					pair.Value, err = NewPathItem(v, compiler.NewContext(k, context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Path = append(x.Path, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not.
-func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) {
-	errors := make([]error, 0)
-	x := &PrimitivesItems{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string type = 1;
-		v1 := compiler.MapValueForKey(m, "type")
-		if v1 != nil {
-			x.Type, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number integer boolean array]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 2;
-		v2 := compiler.MapValueForKey(m, "format")
-		if v2 != nil {
-			x.Format, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 3;
-		v3 := compiler.MapValueForKey(m, "items")
-		if v3 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 4;
-		v4 := compiler.MapValueForKey(m, "collectionFormat")
-		if v4 != nil {
-			x.CollectionFormat, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 5;
-		v5 := compiler.MapValueForKey(m, "default")
-		if v5 != nil {
-			var err error
-			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 6;
-		v6 := compiler.MapValueForKey(m, "maximum")
-		if v6 != nil {
-			switch v6 := v6.(type) {
-			case float64:
-				x.Maximum = v6
-			case float32:
-				x.Maximum = float64(v6)
-			case uint64:
-				x.Maximum = float64(v6)
-			case uint32:
-				x.Maximum = float64(v6)
-			case int64:
-				x.Maximum = float64(v6)
-			case int32:
-				x.Maximum = float64(v6)
-			case int:
-				x.Maximum = float64(v6)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 7;
-		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v7 != nil {
-			x.ExclusiveMaximum, ok = v7.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 8;
-		v8 := compiler.MapValueForKey(m, "minimum")
-		if v8 != nil {
-			switch v8 := v8.(type) {
-			case float64:
-				x.Minimum = v8
-			case float32:
-				x.Minimum = float64(v8)
-			case uint64:
-				x.Minimum = float64(v8)
-			case uint32:
-				x.Minimum = float64(v8)
-			case int64:
-				x.Minimum = float64(v8)
-			case int32:
-				x.Minimum = float64(v8)
-			case int:
-				x.Minimum = float64(v8)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 9;
-		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v9 != nil {
-			x.ExclusiveMinimum, ok = v9.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 10;
-		v10 := compiler.MapValueForKey(m, "maxLength")
-		if v10 != nil {
-			t, ok := v10.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 11;
-		v11 := compiler.MapValueForKey(m, "minLength")
-		if v11 != nil {
-			t, ok := v11.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 12;
-		v12 := compiler.MapValueForKey(m, "pattern")
-		if v12 != nil {
-			x.Pattern, ok = v12.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 13;
-		v13 := compiler.MapValueForKey(m, "maxItems")
-		if v13 != nil {
-			t, ok := v13.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 14;
-		v14 := compiler.MapValueForKey(m, "minItems")
-		if v14 != nil {
-			t, ok := v14.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 15;
-		v15 := compiler.MapValueForKey(m, "uniqueItems")
-		if v15 != nil {
-			x.UniqueItems, ok = v15.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 16;
-		v16 := compiler.MapValueForKey(m, "enum")
-		if v16 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v16.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 17;
-		v17 := compiler.MapValueForKey(m, "multipleOf")
-		if v17 != nil {
-			switch v17 := v17.(type) {
-			case float64:
-				x.MultipleOf = v17
-			case float32:
-				x.MultipleOf = float64(v17)
-			case uint64:
-				x.MultipleOf = float64(v17)
-			case uint32:
-				x.MultipleOf = float64(v17)
-			case int64:
-				x.MultipleOf = float64(v17)
-			case int32:
-				x.MultipleOf = float64(v17)
-			case int:
-				x.MultipleOf = float64(v17)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 18;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewProperties creates an object of type Properties if possible, returning an error if not.
-func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) {
-	errors := make([]error, 0)
-	x := &Properties{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedSchema additional_properties = 1;
-		// MAP: Schema
-		x.AdditionalProperties = make([]*NamedSchema, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedSchema{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not.
-func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) {
-	errors := make([]error, 0)
-	x := &QueryParameterSubSchema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// bool required = 1;
-		v1 := compiler.MapValueForKey(m, "required")
-		if v1 != nil {
-			x.Required, ok = v1.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string in = 2;
-		v2 := compiler.MapValueForKey(m, "in")
-		if v2 != nil {
-			x.In, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [query]
-			if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) {
-				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 3;
-		v3 := compiler.MapValueForKey(m, "description")
-		if v3 != nil {
-			x.Description, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string name = 4;
-		v4 := compiler.MapValueForKey(m, "name")
-		if v4 != nil {
-			x.Name, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool allow_empty_value = 5;
-		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
-		if v5 != nil {
-			x.AllowEmptyValue, ok = v5.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string type = 6;
-		v6 := compiler.MapValueForKey(m, "type")
-		if v6 != nil {
-			x.Type, ok = v6.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [string number boolean integer array]
-			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
-				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 7;
-		v7 := compiler.MapValueForKey(m, "format")
-		if v7 != nil {
-			x.Format, ok = v7.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// PrimitivesItems items = 8;
-		v8 := compiler.MapValueForKey(m, "items")
-		if v8 != nil {
-			var err error
-			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string collection_format = 9;
-		v9 := compiler.MapValueForKey(m, "collectionFormat")
-		if v9 != nil {
-			x.CollectionFormat, ok = v9.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-			// check for valid enum values
-			// [csv ssv tsv pipes multi]
-			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
-				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 10;
-		v10 := compiler.MapValueForKey(m, "default")
-		if v10 != nil {
-			var err error
-			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float maximum = 11;
-		v11 := compiler.MapValueForKey(m, "maximum")
-		if v11 != nil {
-			switch v11 := v11.(type) {
-			case float64:
-				x.Maximum = v11
-			case float32:
-				x.Maximum = float64(v11)
-			case uint64:
-				x.Maximum = float64(v11)
-			case uint32:
-				x.Maximum = float64(v11)
-			case int64:
-				x.Maximum = float64(v11)
-			case int32:
-				x.Maximum = float64(v11)
-			case int:
-				x.Maximum = float64(v11)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 12;
-		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v12 != nil {
-			x.ExclusiveMaximum, ok = v12.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 13;
-		v13 := compiler.MapValueForKey(m, "minimum")
-		if v13 != nil {
-			switch v13 := v13.(type) {
-			case float64:
-				x.Minimum = v13
-			case float32:
-				x.Minimum = float64(v13)
-			case uint64:
-				x.Minimum = float64(v13)
-			case uint32:
-				x.Minimum = float64(v13)
-			case int64:
-				x.Minimum = float64(v13)
-			case int32:
-				x.Minimum = float64(v13)
-			case int:
-				x.Minimum = float64(v13)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 14;
-		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v14 != nil {
-			x.ExclusiveMinimum, ok = v14.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 15;
-		v15 := compiler.MapValueForKey(m, "maxLength")
-		if v15 != nil {
-			t, ok := v15.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 16;
-		v16 := compiler.MapValueForKey(m, "minLength")
-		if v16 != nil {
-			t, ok := v16.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 17;
-		v17 := compiler.MapValueForKey(m, "pattern")
-		if v17 != nil {
-			x.Pattern, ok = v17.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 18;
-		v18 := compiler.MapValueForKey(m, "maxItems")
-		if v18 != nil {
-			t, ok := v18.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 19;
-		v19 := compiler.MapValueForKey(m, "minItems")
-		if v19 != nil {
-			t, ok := v19.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 20;
-		v20 := compiler.MapValueForKey(m, "uniqueItems")
-		if v20 != nil {
-			x.UniqueItems, ok = v20.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 21;
-		v21 := compiler.MapValueForKey(m, "enum")
-		if v21 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v21.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// float multiple_of = 22;
-		v22 := compiler.MapValueForKey(m, "multipleOf")
-		if v22 != nil {
-			switch v22 := v22.(type) {
-			case float64:
-				x.MultipleOf = v22
-			case float32:
-				x.MultipleOf = float64(v22)
-			case uint64:
-				x.MultipleOf = float64(v22)
-			case uint32:
-				x.MultipleOf = float64(v22)
-			case int64:
-				x.MultipleOf = float64(v22)
-			case int32:
-				x.MultipleOf = float64(v22)
-			case int:
-				x.MultipleOf = float64(v22)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 23;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewResponse creates an object of type Response if possible, returning an error if not.
-func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
-	errors := make([]error, 0)
-	x := &Response{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"description"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "examples", "headers", "schema"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string description = 1;
-		v1 := compiler.MapValueForKey(m, "description")
-		if v1 != nil {
-			x.Description, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// SchemaItem schema = 2;
-		v2 := compiler.MapValueForKey(m, "schema")
-		if v2 != nil {
-			var err error
-			x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Headers headers = 3;
-		v3 := compiler.MapValueForKey(m, "headers")
-		if v3 != nil {
-			var err error
-			x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Examples examples = 4;
-		v4 := compiler.MapValueForKey(m, "examples")
-		if v4 != nil {
-			var err error
-			x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 5;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not.
-func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) {
-	errors := make([]error, 0)
-	x := &ResponseDefinitions{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedResponse additional_properties = 1;
-		// MAP: Response
-		x.AdditionalProperties = make([]*NamedResponse, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedResponse{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewResponse(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not.
-func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) {
-	errors := make([]error, 0)
-	x := &ResponseValue{}
-	matched := false
-	// Response response = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewResponse(m, compiler.NewContext("response", context))
-			if matchingError == nil {
-				x.Oneof = &ResponseValue_Response{Response: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// JsonReference json_reference = 2;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
-			if matchingError == nil {
-				x.Oneof = &ResponseValue_JsonReference{JsonReference: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewResponses creates an object of type Responses if possible, returning an error if not.
-func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) {
-	errors := make([]error, 0)
-	x := &Responses{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{}
-		allowedPatterns := []*regexp.Regexp{pattern2, pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// repeated NamedResponseValue response_code = 1;
-		// MAP: ResponseValue ^([0-9]{3})$|^(default)$
-		x.ResponseCode = make([]*NamedResponseValue, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if pattern2.MatchString(k) {
-					pair := &NamedResponseValue{}
-					pair.Name = k
-					var err error
-					pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.ResponseCode = append(x.ResponseCode, pair)
-				}
-			}
-		}
-		// repeated NamedAny vendor_extension = 2;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewSchema creates an object of type Schema if possible, returning an error if not.
-func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
-	errors := make([]error, 0)
-	x := &Schema{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string _ref = 1;
-		v1 := compiler.MapValueForKey(m, "$ref")
-		if v1 != nil {
-			x.XRef, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string format = 2;
-		v2 := compiler.MapValueForKey(m, "format")
-		if v2 != nil {
-			x.Format, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string title = 3;
-		v3 := compiler.MapValueForKey(m, "title")
-		if v3 != nil {
-			x.Title, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 4;
-		v4 := compiler.MapValueForKey(m, "description")
-		if v4 != nil {
-			x.Description, ok = v4.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Any default = 5;
-		v5 := compiler.MapValueForKey(m, "default")
-		if v5 != nil {
-			var err error
-			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// float multiple_of = 6;
-		v6 := compiler.MapValueForKey(m, "multipleOf")
-		if v6 != nil {
-			switch v6 := v6.(type) {
-			case float64:
-				x.MultipleOf = v6
-			case float32:
-				x.MultipleOf = float64(v6)
-			case uint64:
-				x.MultipleOf = float64(v6)
-			case uint32:
-				x.MultipleOf = float64(v6)
-			case int64:
-				x.MultipleOf = float64(v6)
-			case int32:
-				x.MultipleOf = float64(v6)
-			case int:
-				x.MultipleOf = float64(v6)
-			default:
-				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float maximum = 7;
-		v7 := compiler.MapValueForKey(m, "maximum")
-		if v7 != nil {
-			switch v7 := v7.(type) {
-			case float64:
-				x.Maximum = v7
-			case float32:
-				x.Maximum = float64(v7)
-			case uint64:
-				x.Maximum = float64(v7)
-			case uint32:
-				x.Maximum = float64(v7)
-			case int64:
-				x.Maximum = float64(v7)
-			case int32:
-				x.Maximum = float64(v7)
-			case int:
-				x.Maximum = float64(v7)
-			default:
-				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_maximum = 8;
-		v8 := compiler.MapValueForKey(m, "exclusiveMaximum")
-		if v8 != nil {
-			x.ExclusiveMaximum, ok = v8.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// float minimum = 9;
-		v9 := compiler.MapValueForKey(m, "minimum")
-		if v9 != nil {
-			switch v9 := v9.(type) {
-			case float64:
-				x.Minimum = v9
-			case float32:
-				x.Minimum = float64(v9)
-			case uint64:
-				x.Minimum = float64(v9)
-			case uint32:
-				x.Minimum = float64(v9)
-			case int64:
-				x.Minimum = float64(v9)
-			case int32:
-				x.Minimum = float64(v9)
-			case int:
-				x.Minimum = float64(v9)
-			default:
-				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool exclusive_minimum = 10;
-		v10 := compiler.MapValueForKey(m, "exclusiveMinimum")
-		if v10 != nil {
-			x.ExclusiveMinimum, ok = v10.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_length = 11;
-		v11 := compiler.MapValueForKey(m, "maxLength")
-		if v11 != nil {
-			t, ok := v11.(int)
-			if ok {
-				x.MaxLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_length = 12;
-		v12 := compiler.MapValueForKey(m, "minLength")
-		if v12 != nil {
-			t, ok := v12.(int)
-			if ok {
-				x.MinLength = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string pattern = 13;
-		v13 := compiler.MapValueForKey(m, "pattern")
-		if v13 != nil {
-			x.Pattern, ok = v13.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_items = 14;
-		v14 := compiler.MapValueForKey(m, "maxItems")
-		if v14 != nil {
-			t, ok := v14.(int)
-			if ok {
-				x.MaxItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_items = 15;
-		v15 := compiler.MapValueForKey(m, "minItems")
-		if v15 != nil {
-			t, ok := v15.(int)
-			if ok {
-				x.MinItems = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool unique_items = 16;
-		v16 := compiler.MapValueForKey(m, "uniqueItems")
-		if v16 != nil {
-			x.UniqueItems, ok = v16.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 max_properties = 17;
-		v17 := compiler.MapValueForKey(m, "maxProperties")
-		if v17 != nil {
-			t, ok := v17.(int)
-			if ok {
-				x.MaxProperties = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// int64 min_properties = 18;
-		v18 := compiler.MapValueForKey(m, "minProperties")
-		if v18 != nil {
-			t, ok := v18.(int)
-			if ok {
-				x.MinProperties = int64(t)
-			} else {
-				message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated string required = 19;
-		v19 := compiler.MapValueForKey(m, "required")
-		if v19 != nil {
-			v, ok := v19.([]interface{})
-			if ok {
-				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
-			} else {
-				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated Any enum = 20;
-		v20 := compiler.MapValueForKey(m, "enum")
-		if v20 != nil {
-			// repeated Any
-			x.Enum = make([]*Any, 0)
-			a, ok := v20.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewAny(item, compiler.NewContext("enum", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.Enum = append(x.Enum, y)
-				}
-			}
-		}
-		// AdditionalPropertiesItem additional_properties = 21;
-		v21 := compiler.MapValueForKey(m, "additionalProperties")
-		if v21 != nil {
-			var err error
-			x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// TypeItem type = 22;
-		v22 := compiler.MapValueForKey(m, "type")
-		if v22 != nil {
-			var err error
-			x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// ItemsItem items = 23;
-		v23 := compiler.MapValueForKey(m, "items")
-		if v23 != nil {
-			var err error
-			x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated Schema all_of = 24;
-		v24 := compiler.MapValueForKey(m, "allOf")
-		if v24 != nil {
-			// repeated Schema
-			x.AllOf = make([]*Schema, 0)
-			a, ok := v24.([]interface{})
-			if ok {
-				for _, item := range a {
-					y, err := NewSchema(item, compiler.NewContext("allOf", context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-					x.AllOf = append(x.AllOf, y)
-				}
-			}
-		}
-		// Properties properties = 25;
-		v25 := compiler.MapValueForKey(m, "properties")
-		if v25 != nil {
-			var err error
-			x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// string discriminator = 26;
-		v26 := compiler.MapValueForKey(m, "discriminator")
-		if v26 != nil {
-			x.Discriminator, ok = v26.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool read_only = 27;
-		v27 := compiler.MapValueForKey(m, "readOnly")
-		if v27 != nil {
-			x.ReadOnly, ok = v27.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// Xml xml = 28;
-		v28 := compiler.MapValueForKey(m, "xml")
-		if v28 != nil {
-			var err error
-			x.Xml, err = NewXml(v28, compiler.NewContext("xml", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// ExternalDocs external_docs = 29;
-		v29 := compiler.MapValueForKey(m, "externalDocs")
-		if v29 != nil {
-			var err error
-			x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// Any example = 30;
-		v30 := compiler.MapValueForKey(m, "example")
-		if v30 != nil {
-			var err error
-			x.Example, err = NewAny(v30, compiler.NewContext("example", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 31;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not.
-func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) {
-	errors := make([]error, 0)
-	x := &SchemaItem{}
-	matched := false
-	// Schema schema = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
-			if matchingError == nil {
-				x.Oneof = &SchemaItem_Schema{Schema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// FileSchema file_schema = 2;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context))
-			if matchingError == nil {
-				x.Oneof = &SchemaItem_FileSchema{FileSchema: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not.
-func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) {
-	errors := make([]error, 0)
-	x := &SecurityDefinitions{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedSecurityDefinitionsItem additional_properties = 1;
-		// MAP: SecurityDefinitionsItem
-		x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedSecurityDefinitionsItem{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not.
-func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) {
-	errors := make([]error, 0)
-	x := &SecurityDefinitionsItem{}
-	matched := false
-	// BasicAuthenticationSecurity basic_authentication_security = 1;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// ApiKeySecurity api_key_security = 2;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// Oauth2ImplicitSecurity oauth2_implicit_security = 3;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// Oauth2PasswordSecurity oauth2_password_security = 4;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// Oauth2ApplicationSecurity oauth2_application_security = 5;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	// Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
-	{
-		m, ok := compiler.UnpackMap(in)
-		if ok {
-			// errors might be ok here, they mean we just don't have the right subtype
-			t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context))
-			if matchingError == nil {
-				x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t}
-				matched = true
-			} else {
-				errors = append(errors, matchingError)
-			}
-		}
-	}
-	if matched {
-		// since the oneof matched one of its possibilities, discard any matching errors
-		errors = make([]error, 0)
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not.
-func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) {
-	errors := make([]error, 0)
-	x := &SecurityRequirement{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedStringArray additional_properties = 1;
-		// MAP: StringArray
-		x.AdditionalProperties = make([]*NamedStringArray, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedStringArray{}
-				pair.Name = k
-				var err error
-				pair.Value, err = NewStringArray(v, compiler.NewContext(k, context))
-				if err != nil {
-					errors = append(errors, err)
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewStringArray creates an object of type StringArray if possible, returning an error if not.
-func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) {
-	errors := make([]error, 0)
-	x := &StringArray{}
-	a, ok := in.([]interface{})
-	if !ok {
-		message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		x.Value = make([]string, 0)
-		for _, s := range a {
-			x.Value = append(x.Value, s.(string))
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewTag creates an object of type Tag if possible, returning an error if not.
-func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
-	errors := make([]error, 0)
-	x := &Tag{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		requiredKeys := []string{"name"}
-		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
-		if len(missingKeys) > 0 {
-			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		allowedKeys := []string{"description", "externalDocs", "name"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string description = 2;
-		v2 := compiler.MapValueForKey(m, "description")
-		if v2 != nil {
-			x.Description, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// ExternalDocs external_docs = 3;
-		v3 := compiler.MapValueForKey(m, "externalDocs")
-		if v3 != nil {
-			var err error
-			x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context))
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-		// repeated NamedAny vendor_extension = 4;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewTypeItem creates an object of type TypeItem if possible, returning an error if not.
-func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) {
-	errors := make([]error, 0)
-	x := &TypeItem{}
-	switch in := in.(type) {
-	case string:
-		x.Value = make([]string, 0)
-		x.Value = append(x.Value, in)
-	case []interface{}:
-		x.Value = make([]string, 0)
-		for _, v := range in {
-			value, ok := v.(string)
-			if ok {
-				x.Value = append(x.Value, value)
-			} else {
-				message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-	default:
-		message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not.
-func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) {
-	errors := make([]error, 0)
-	x := &VendorExtension{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		// repeated NamedAny additional_properties = 1;
-		// MAP: Any
-		x.AdditionalProperties = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				pair := &NamedAny{}
-				pair.Name = k
-				result := &Any{}
-				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-				if handled {
-					if err != nil {
-						errors = append(errors, err)
-					} else {
-						bytes, _ := yaml.Marshal(v)
-						result.Yaml = string(bytes)
-						result.Value = resultFromExt
-						pair.Value = result
-					}
-				} else {
-					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-					if err != nil {
-						errors = append(errors, err)
-					}
-				}
-				x.AdditionalProperties = append(x.AdditionalProperties, pair)
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// NewXml creates an object of type Xml if possible, returning an error if not.
-func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
-	errors := make([]error, 0)
-	x := &Xml{}
-	m, ok := compiler.UnpackMap(in)
-	if !ok {
-		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
-		errors = append(errors, compiler.NewError(context, message))
-	} else {
-		allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"}
-		allowedPatterns := []*regexp.Regexp{pattern0}
-		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
-		if len(invalidKeys) > 0 {
-			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
-			errors = append(errors, compiler.NewError(context, message))
-		}
-		// string name = 1;
-		v1 := compiler.MapValueForKey(m, "name")
-		if v1 != nil {
-			x.Name, ok = v1.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string namespace = 2;
-		v2 := compiler.MapValueForKey(m, "namespace")
-		if v2 != nil {
-			x.Namespace, ok = v2.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// string prefix = 3;
-		v3 := compiler.MapValueForKey(m, "prefix")
-		if v3 != nil {
-			x.Prefix, ok = v3.(string)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool attribute = 4;
-		v4 := compiler.MapValueForKey(m, "attribute")
-		if v4 != nil {
-			x.Attribute, ok = v4.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// bool wrapped = 5;
-		v5 := compiler.MapValueForKey(m, "wrapped")
-		if v5 != nil {
-			x.Wrapped, ok = v5.(bool)
-			if !ok {
-				message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5)
-				errors = append(errors, compiler.NewError(context, message))
-			}
-		}
-		// repeated NamedAny vendor_extension = 6;
-		// MAP: Any ^x-
-		x.VendorExtension = make([]*NamedAny, 0)
-		for _, item := range m {
-			k, ok := compiler.StringValue(item.Key)
-			if ok {
-				v := item.Value
-				if strings.HasPrefix(k, "x-") {
-					pair := &NamedAny{}
-					pair.Name = k
-					result := &Any{}
-					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
-					if handled {
-						if err != nil {
-							errors = append(errors, err)
-						} else {
-							bytes, _ := yaml.Marshal(v)
-							result.Yaml = string(bytes)
-							result.Value = resultFromExt
-							pair.Value = result
-						}
-					} else {
-						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
-						if err != nil {
-							errors = append(errors, err)
-						}
-					}
-					x.VendorExtension = append(x.VendorExtension, pair)
-				}
-			}
-		}
-	}
-	return x, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside AdditionalPropertiesItem objects.
-func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema)
-		if ok {
-			_, err := p.Schema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Any objects.
-func (m *Any) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ApiKeySecurity objects.
-func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects.
-func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside BodyParameter objects.
-func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Schema != nil {
-		_, err := m.Schema.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Contact objects.
-func (m *Contact) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Default objects.
-func (m *Default) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Definitions objects.
-func (m *Definitions) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Document objects.
-func (m *Document) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Info != nil {
-		_, err := m.Info.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Paths != nil {
-		_, err := m.Paths.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Definitions != nil {
-		_, err := m.Definitions.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Parameters != nil {
-		_, err := m.Parameters.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Responses != nil {
-		_, err := m.Responses.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Security {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	if m.SecurityDefinitions != nil {
-		_, err := m.SecurityDefinitions.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Tags {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	if m.ExternalDocs != nil {
-		_, err := m.ExternalDocs.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Examples objects.
-func (m *Examples) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ExternalDocs objects.
-func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside FileSchema objects.
-func (m *FileSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.ExternalDocs != nil {
-		_, err := m.ExternalDocs.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Example != nil {
-		_, err := m.Example.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside FormDataParameterSubSchema objects.
-func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Header objects.
-func (m *Header) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside HeaderParameterSubSchema objects.
-func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Headers objects.
-func (m *Headers) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Info objects.
-func (m *Info) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Contact != nil {
-		_, err := m.Contact.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.License != nil {
-		_, err := m.License.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ItemsItem objects.
-func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.Schema {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside JsonReference objects.
-func (m *JsonReference) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.XRef != "" {
-		info, err := compiler.ReadInfoForRef(root, m.XRef)
-		if err != nil {
-			return nil, err
-		}
-		if info != nil {
-			replacement, err := NewJsonReference(info, nil)
-			if err == nil {
-				*m = *replacement
-				return m.ResolveReferences(root)
-			}
-		}
-		return info, nil
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside License objects.
-func (m *License) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedAny objects.
-func (m *NamedAny) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedHeader objects.
-func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedParameter objects.
-func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedPathItem objects.
-func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedResponse objects.
-func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedResponseValue objects.
-func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedSchema objects.
-func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects.
-func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedString objects.
-func (m *NamedString) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NamedStringArray objects.
-func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Value != nil {
-		_, err := m.Value.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside NonBodyParameter objects.
-func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema)
-		if ok {
-			_, err := p.HeaderParameterSubSchema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema)
-		if ok {
-			_, err := p.FormDataParameterSubSchema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema)
-		if ok {
-			_, err := p.QueryParameterSubSchema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema)
-		if ok {
-			_, err := p.PathParameterSubSchema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects.
-func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Scopes != nil {
-		_, err := m.Scopes.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects.
-func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Scopes != nil {
-		_, err := m.Scopes.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects.
-func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Scopes != nil {
-		_, err := m.Scopes.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects.
-func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Scopes != nil {
-		_, err := m.Scopes.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Oauth2Scopes objects.
-func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Operation objects.
-func (m *Operation) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.ExternalDocs != nil {
-		_, err := m.ExternalDocs.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Parameters {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	if m.Responses != nil {
-		_, err := m.Responses.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Security {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Parameter objects.
-func (m *Parameter) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*Parameter_BodyParameter)
-		if ok {
-			_, err := p.BodyParameter.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*Parameter_NonBodyParameter)
-		if ok {
-			_, err := p.NonBodyParameter.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ParameterDefinitions objects.
-func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ParametersItem objects.
-func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*ParametersItem_Parameter)
-		if ok {
-			_, err := p.Parameter.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*ParametersItem_JsonReference)
-		if ok {
-			info, err := p.JsonReference.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			} else if info != nil {
-				n, err := NewParametersItem(info, nil)
-				if err != nil {
-					return nil, err
-				} else if n != nil {
-					*m = *n
-					return nil, nil
-				}
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside PathItem objects.
-func (m *PathItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.XRef != "" {
-		info, err := compiler.ReadInfoForRef(root, m.XRef)
-		if err != nil {
-			return nil, err
-		}
-		if info != nil {
-			replacement, err := NewPathItem(info, nil)
-			if err == nil {
-				*m = *replacement
-				return m.ResolveReferences(root)
-			}
-		}
-		return info, nil
-	}
-	if m.Get != nil {
-		_, err := m.Get.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Put != nil {
-		_, err := m.Put.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Post != nil {
-		_, err := m.Post.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Delete != nil {
-		_, err := m.Delete.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Options != nil {
-		_, err := m.Options.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Head != nil {
-		_, err := m.Head.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Patch != nil {
-		_, err := m.Patch.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Parameters {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside PathParameterSubSchema objects.
-func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Paths objects.
-func (m *Paths) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.Path {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside PrimitivesItems objects.
-func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Properties objects.
-func (m *Properties) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside QueryParameterSubSchema objects.
-func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Response objects.
-func (m *Response) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.Schema != nil {
-		_, err := m.Schema.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Headers != nil {
-		_, err := m.Headers.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Examples != nil {
-		_, err := m.Examples.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ResponseDefinitions objects.
-func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside ResponseValue objects.
-func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*ResponseValue_Response)
-		if ok {
-			_, err := p.Response.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*ResponseValue_JsonReference)
-		if ok {
-			info, err := p.JsonReference.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			} else if info != nil {
-				n, err := NewResponseValue(info, nil)
-				if err != nil {
-					return nil, err
-				} else if n != nil {
-					*m = *n
-					return nil, nil
-				}
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Responses objects.
-func (m *Responses) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.ResponseCode {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Schema objects.
-func (m *Schema) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.XRef != "" {
-		info, err := compiler.ReadInfoForRef(root, m.XRef)
-		if err != nil {
-			return nil, err
-		}
-		if info != nil {
-			replacement, err := NewSchema(info, nil)
-			if err == nil {
-				*m = *replacement
-				return m.ResolveReferences(root)
-			}
-		}
-		return info, nil
-	}
-	if m.Default != nil {
-		_, err := m.Default.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.Enum {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	if m.AdditionalProperties != nil {
-		_, err := m.AdditionalProperties.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Type != nil {
-		_, err := m.Type.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Items != nil {
-		_, err := m.Items.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.AllOf {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	if m.Properties != nil {
-		_, err := m.Properties.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Xml != nil {
-		_, err := m.Xml.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.ExternalDocs != nil {
-		_, err := m.ExternalDocs.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if m.Example != nil {
-		_, err := m.Example.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside SchemaItem objects.
-func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*SchemaItem_Schema)
-		if ok {
-			_, err := p.Schema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SchemaItem_FileSchema)
-		if ok {
-			_, err := p.FileSchema.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside SecurityDefinitions objects.
-func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside SecurityDefinitionsItem objects.
-func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity)
-		if ok {
-			_, err := p.BasicAuthenticationSecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity)
-		if ok {
-			_, err := p.ApiKeySecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)
-		if ok {
-			_, err := p.Oauth2ImplicitSecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity)
-		if ok {
-			_, err := p.Oauth2PasswordSecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity)
-		if ok {
-			_, err := p.Oauth2ApplicationSecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	{
-		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)
-		if ok {
-			_, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside SecurityRequirement objects.
-func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside StringArray objects.
-func (m *StringArray) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Tag objects.
-func (m *Tag) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	if m.ExternalDocs != nil {
-		_, err := m.ExternalDocs.ResolveReferences(root)
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside TypeItem objects.
-func (m *TypeItem) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside VendorExtension objects.
-func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.AdditionalProperties {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ResolveReferences resolves references found inside Xml objects.
-func (m *Xml) ResolveReferences(root string) (interface{}, error) {
-	errors := make([]error, 0)
-	for _, item := range m.VendorExtension {
-		if item != nil {
-			_, err := item.ResolveReferences(root)
-			if err != nil {
-				errors = append(errors, err)
-			}
-		}
-	}
-	return nil, compiler.NewErrorGroupOrNil(errors)
-}
-
-// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export.
-func (m *AdditionalPropertiesItem) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// AdditionalPropertiesItem
-	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetSchema()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
-		return v1.Boolean
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of Any suitable for JSON or YAML export.
-func (m *Any) ToRawInfo() interface{} {
-	var err error
-	var info1 []yaml.MapSlice
-	err = yaml.Unmarshal([]byte(m.Yaml), &info1)
-	if err == nil {
-		return info1
-	}
-	var info2 yaml.MapSlice
-	err = yaml.Unmarshal([]byte(m.Yaml), &info2)
-	if err == nil {
-		return info2
-	}
-	var info3 interface{}
-	err = yaml.Unmarshal([]byte(m.Yaml), &info3)
-	if err == nil {
-		return info3
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
-func (m *ApiKeySecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
-func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
-func (m *BodyParameter) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Required != false {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.Schema != nil {
-		info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
-	}
-	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
-func (m *Contact) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Url != "" {
-		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
-	}
-	if m.Email != "" {
-		info = append(info, yaml.MapItem{Key: "email", Value: m.Email})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Default suitable for JSON or YAML export.
-func (m *Default) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
-func (m *Definitions) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Document suitable for JSON or YAML export.
-func (m *Document) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Swagger != "" {
-		info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
-	}
-	if m.Info != nil {
-		info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
-	}
-	// &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Host != "" {
-		info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
-	}
-	if m.BasePath != "" {
-		info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath})
-	}
-	if len(m.Schemes) != 0 {
-		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
-	}
-	if len(m.Consumes) != 0 {
-		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
-	}
-	if len(m.Produces) != 0 {
-		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
-	}
-	if m.Paths != nil {
-		info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
-	}
-	// &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Definitions != nil {
-		info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
-	}
-	// &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Parameters != nil {
-		info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()})
-	}
-	// &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Responses != nil {
-		info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
-	}
-	// &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.Security) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Security {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "security", Value: items})
-	}
-	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.SecurityDefinitions != nil {
-		info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()})
-	}
-	// &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.Tags) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Tags {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "tags", Value: items})
-	}
-	// &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.ExternalDocs != nil {
-		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
-	}
-	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
-func (m *Examples) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
-func (m *ExternalDocs) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Url != "" {
-		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
-func (m *FileSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Title != "" {
-		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.Required) != 0 {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.ReadOnly != false {
-		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
-	}
-	if m.ExternalDocs != nil {
-		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
-	}
-	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Example != nil {
-		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
-	}
-	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
-func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Required != false {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.AllowEmptyValue != false {
-		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
-	}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Header suitable for JSON or YAML export.
-func (m *Header) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
-func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Required != false {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
-func (m *Headers) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Info suitable for JSON or YAML export.
-func (m *Info) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Title != "" {
-		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
-	}
-	if m.Version != "" {
-		info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.TermsOfService != "" {
-		info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService})
-	}
-	if m.Contact != nil {
-		info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()})
-	}
-	// &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.License != nil {
-		info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()})
-	}
-	// &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
-func (m *ItemsItem) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if len(m.Schema) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Schema {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "schema", Value: items})
-	}
-	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	return info
-}
-
-// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
-func (m *JsonReference) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.XRef != "" {
-		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	return info
-}
-
-// ToRawInfo returns a description of License suitable for JSON or YAML export.
-func (m *License) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Url != "" {
-		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
-func (m *NamedAny) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
-func (m *NamedHeader) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
-func (m *NamedParameter) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
-func (m *NamedPathItem) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
-func (m *NamedResponse) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
-func (m *NamedResponseValue) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
-func (m *NamedSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
-func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
-func (m *NamedString) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Value != "" {
-		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
-	}
-	return info
-}
-
-// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
-func (m *NamedStringArray) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	// &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
-	return info
-}
-
-// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export.
-func (m *NonBodyParameter) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// NonBodyParameter
-	// {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetHeaderParameterSubSchema()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetFormDataParameterSubSchema()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	// {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v2 := m.GetQueryParameterSubSchema()
-	if v2 != nil {
-		return v2.ToRawInfo()
-	}
-	// {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v3 := m.GetPathParameterSubSchema()
-	if v3 != nil {
-		return v3.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
-func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Flow != "" {
-		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
-	}
-	if m.Scopes != nil {
-		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
-	}
-	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.AuthorizationUrl != "" {
-		info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
-	}
-	if m.TokenUrl != "" {
-		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
-func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Flow != "" {
-		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
-	}
-	if m.Scopes != nil {
-		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
-	}
-	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.TokenUrl != "" {
-		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
-func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Flow != "" {
-		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
-	}
-	if m.Scopes != nil {
-		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
-	}
-	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.AuthorizationUrl != "" {
-		info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
-func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Flow != "" {
-		info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
-	}
-	if m.Scopes != nil {
-		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
-	}
-	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.TokenUrl != "" {
-		info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
-func (m *Oauth2Scopes) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
-func (m *Operation) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if len(m.Tags) != 0 {
-		info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
-	}
-	if m.Summary != "" {
-		info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.ExternalDocs != nil {
-		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
-	}
-	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.OperationId != "" {
-		info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId})
-	}
-	if len(m.Produces) != 0 {
-		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
-	}
-	if len(m.Consumes) != 0 {
-		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
-	}
-	if len(m.Parameters) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Parameters {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
-	}
-	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
-	if m.Responses != nil {
-		info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
-	}
-	// &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.Schemes) != 0 {
-		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
-	}
-	if m.Deprecated != false {
-		info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated})
-	}
-	if len(m.Security) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Security {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "security", Value: items})
-	}
-	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Parameter suitable for JSON or YAML export.
-func (m *Parameter) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// Parameter
-	// {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetBodyParameter()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetNonBodyParameter()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
-func (m *ParameterDefinitions) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export.
-func (m *ParametersItem) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// ParametersItem
-	// {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetParameter()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetJsonReference()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
-func (m *PathItem) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.XRef != "" {
-		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
-	}
-	if m.Get != nil {
-		info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()})
-	}
-	// &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Put != nil {
-		info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()})
-	}
-	// &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Post != nil {
-		info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()})
-	}
-	// &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Delete != nil {
-		info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()})
-	}
-	// &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Options != nil {
-		info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()})
-	}
-	// &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Head != nil {
-		info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()})
-	}
-	// &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Patch != nil {
-		info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()})
-	}
-	// &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.Parameters) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Parameters {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
-	}
-	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
-func (m *PathParameterSubSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Required != false {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
-func (m *Paths) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	if m.Path != nil {
-		for _, item := range m.Path {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
-func (m *PrimitivesItems) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
-func (m *Properties) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
-func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Required != false {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if m.In != "" {
-		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.AllowEmptyValue != false {
-		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
-	}
-	if m.Type != "" {
-		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Items != nil {
-		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
-	}
-	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.CollectionFormat != "" {
-		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Response suitable for JSON or YAML export.
-func (m *Response) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Schema != nil {
-		info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
-	}
-	// &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Headers != nil {
-		info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()})
-	}
-	// &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Examples != nil {
-		info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()})
-	}
-	// &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
-func (m *ResponseDefinitions) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export.
-func (m *ResponseValue) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// ResponseValue
-	// {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetResponse()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetJsonReference()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
-func (m *Responses) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.ResponseCode != nil {
-		for _, item := range m.ResponseCode {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
-func (m *Schema) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.XRef != "" {
-		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
-	}
-	if m.Format != "" {
-		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
-	}
-	if m.Title != "" {
-		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.Default != nil {
-		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
-	}
-	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.MultipleOf != 0.0 {
-		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
-	}
-	if m.Maximum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
-	}
-	if m.ExclusiveMaximum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
-	}
-	if m.Minimum != 0.0 {
-		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
-	}
-	if m.ExclusiveMinimum != false {
-		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
-	}
-	if m.MaxLength != 0 {
-		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
-	}
-	if m.MinLength != 0 {
-		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
-	}
-	if m.Pattern != "" {
-		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
-	}
-	if m.MaxItems != 0 {
-		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
-	}
-	if m.MinItems != 0 {
-		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
-	}
-	if m.UniqueItems != false {
-		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
-	}
-	if m.MaxProperties != 0 {
-		info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties})
-	}
-	if m.MinProperties != 0 {
-		info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties})
-	}
-	if len(m.Required) != 0 {
-		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
-	}
-	if len(m.Enum) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.Enum {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "enum", Value: items})
-	}
-	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.AdditionalProperties != nil {
-		info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()})
-	}
-	// &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Type != nil {
-		if len(m.Type.Value) == 1 {
-			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]})
-		} else {
-			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value})
-		}
-	}
-	// &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Items != nil {
-		items := make([]interface{}, 0)
-		for _, item := range m.Items.Schema {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "items", Value: items[0]})
-	}
-	// &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if len(m.AllOf) != 0 {
-		items := make([]interface{}, 0)
-		for _, item := range m.AllOf {
-			items = append(items, item.ToRawInfo())
-		}
-		info = append(info, yaml.MapItem{Key: "allOf", Value: items})
-	}
-	// &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
-	if m.Properties != nil {
-		info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()})
-	}
-	// &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Discriminator != "" {
-		info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator})
-	}
-	if m.ReadOnly != false {
-		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
-	}
-	if m.Xml != nil {
-		info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()})
-	}
-	// &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.ExternalDocs != nil {
-		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
-	}
-	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.Example != nil {
-		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
-	}
-	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export.
-func (m *SchemaItem) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// SchemaItem
-	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetSchema()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetFileSchema()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
-func (m *SecurityDefinitions) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export.
-func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
-	// ONE OF WRAPPER
-	// SecurityDefinitionsItem
-	// {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v0 := m.GetBasicAuthenticationSecurity()
-	if v0 != nil {
-		return v0.ToRawInfo()
-	}
-	// {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v1 := m.GetApiKeySecurity()
-	if v1 != nil {
-		return v1.ToRawInfo()
-	}
-	// {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v2 := m.GetOauth2ImplicitSecurity()
-	if v2 != nil {
-		return v2.ToRawInfo()
-	}
-	// {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v3 := m.GetOauth2PasswordSecurity()
-	if v3 != nil {
-		return v3.ToRawInfo()
-	}
-	// {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v4 := m.GetOauth2ApplicationSecurity()
-	if v4 != nil {
-		return v4.ToRawInfo()
-	}
-	// {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	v5 := m.GetOauth2AccessCodeSecurity()
-	if v5 != nil {
-		return v5.ToRawInfo()
-	}
-	return nil
-}
-
-// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
-func (m *SecurityRequirement) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of StringArray suitable for JSON or YAML export.
-func (m *StringArray) ToRawInfo() interface{} {
-	return m.Value
-}
-
-// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
-func (m *Tag) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Description != "" {
-		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
-	}
-	if m.ExternalDocs != nil {
-		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
-	}
-	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
-func (m *TypeItem) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if len(m.Value) != 0 {
-		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
-	}
-	return info
-}
-
-// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
-func (m *VendorExtension) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.AdditionalProperties != nil {
-		for _, item := range m.AdditionalProperties {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
-	return info
-}
-
-// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
-func (m *Xml) ToRawInfo() interface{} {
-	info := yaml.MapSlice{}
-	if m.Name != "" {
-		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
-	}
-	if m.Namespace != "" {
-		info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace})
-	}
-	if m.Prefix != "" {
-		info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix})
-	}
-	if m.Attribute != false {
-		info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute})
-	}
-	if m.Wrapped != false {
-		info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped})
-	}
-	if m.VendorExtension != nil {
-		for _, item := range m.VendorExtension {
-			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
-		}
-	}
-	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
-	return info
-}
-
-var (
-	pattern0 = regexp.MustCompile("^x-")
-	pattern1 = regexp.MustCompile("^/")
-	pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$")
-)
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
deleted file mode 100644
index a030fa6..0000000
--- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
+++ /dev/null
@@ -1,4455 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: OpenAPIv2/OpenAPIv2.proto
-
-/*
-Package openapi_v2 is a generated protocol buffer package.
-
-It is generated from these files:
-	OpenAPIv2/OpenAPIv2.proto
-
-It has these top-level messages:
-	AdditionalPropertiesItem
-	Any
-	ApiKeySecurity
-	BasicAuthenticationSecurity
-	BodyParameter
-	Contact
-	Default
-	Definitions
-	Document
-	Examples
-	ExternalDocs
-	FileSchema
-	FormDataParameterSubSchema
-	Header
-	HeaderParameterSubSchema
-	Headers
-	Info
-	ItemsItem
-	JsonReference
-	License
-	NamedAny
-	NamedHeader
-	NamedParameter
-	NamedPathItem
-	NamedResponse
-	NamedResponseValue
-	NamedSchema
-	NamedSecurityDefinitionsItem
-	NamedString
-	NamedStringArray
-	NonBodyParameter
-	Oauth2AccessCodeSecurity
-	Oauth2ApplicationSecurity
-	Oauth2ImplicitSecurity
-	Oauth2PasswordSecurity
-	Oauth2Scopes
-	Operation
-	Parameter
-	ParameterDefinitions
-	ParametersItem
-	PathItem
-	PathParameterSubSchema
-	Paths
-	PrimitivesItems
-	Properties
-	QueryParameterSubSchema
-	Response
-	ResponseDefinitions
-	ResponseValue
-	Responses
-	Schema
-	SchemaItem
-	SecurityDefinitions
-	SecurityDefinitionsItem
-	SecurityRequirement
-	StringArray
-	Tag
-	TypeItem
-	VendorExtension
-	Xml
-*/
-package openapi_v2
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type AdditionalPropertiesItem struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*AdditionalPropertiesItem_Schema
-	//	*AdditionalPropertiesItem_Boolean
-	Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *AdditionalPropertiesItem) Reset()                    { *m = AdditionalPropertiesItem{} }
-func (m *AdditionalPropertiesItem) String() string            { return proto.CompactTextString(m) }
-func (*AdditionalPropertiesItem) ProtoMessage()               {}
-func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-type isAdditionalPropertiesItem_Oneof interface {
-	isAdditionalPropertiesItem_Oneof()
-}
-
-type AdditionalPropertiesItem_Schema struct {
-	Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"`
-}
-type AdditionalPropertiesItem_Boolean struct {
-	Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"`
-}
-
-func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof()  {}
-func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {}
-
-func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *AdditionalPropertiesItem) GetSchema() *Schema {
-	if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok {
-		return x.Schema
-	}
-	return nil
-}
-
-func (m *AdditionalPropertiesItem) GetBoolean() bool {
-	if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
-		return x.Boolean
-	}
-	return false
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{
-		(*AdditionalPropertiesItem_Schema)(nil),
-		(*AdditionalPropertiesItem_Boolean)(nil),
-	}
-}
-
-func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*AdditionalPropertiesItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *AdditionalPropertiesItem_Schema:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Schema); err != nil {
-			return err
-		}
-	case *AdditionalPropertiesItem_Boolean:
-		t := uint64(0)
-		if x.Boolean {
-			t = 1
-		}
-		b.EncodeVarint(2<<3 | proto.WireVarint)
-		b.EncodeVarint(t)
-	case nil:
-	default:
-		return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*AdditionalPropertiesItem)
-	switch tag {
-	case 1: // oneof.schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Schema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &AdditionalPropertiesItem_Schema{msg}
-		return true, err
-	case 2: // oneof.boolean
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*AdditionalPropertiesItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *AdditionalPropertiesItem_Schema:
-		s := proto.Size(x.Schema)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *AdditionalPropertiesItem_Boolean:
-		n += proto.SizeVarint(2<<3 | proto.WireVarint)
-		n += 1
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type Any struct {
-	Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
-	Yaml  string               `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"`
-}
-
-func (m *Any) Reset()                    { *m = Any{} }
-func (m *Any) String() string            { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage()               {}
-func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *Any) GetValue() *google_protobuf.Any {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *Any) GetYaml() string {
-	if m != nil {
-		return m.Yaml
-	}
-	return ""
-}
-
-type ApiKeySecurity struct {
-	Type            string      `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Name            string      `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
-	In              string      `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"`
-	Description     string      `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *ApiKeySecurity) Reset()                    { *m = ApiKeySecurity{} }
-func (m *ApiKeySecurity) String() string            { return proto.CompactTextString(m) }
-func (*ApiKeySecurity) ProtoMessage()               {}
-func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *ApiKeySecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *ApiKeySecurity) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *ApiKeySecurity) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *ApiKeySecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type BasicAuthenticationSecurity struct {
-	Type            string      `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Description     string      `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *BasicAuthenticationSecurity) Reset()                    { *m = BasicAuthenticationSecurity{} }
-func (m *BasicAuthenticationSecurity) String() string            { return proto.CompactTextString(m) }
-func (*BasicAuthenticationSecurity) ProtoMessage()               {}
-func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *BasicAuthenticationSecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *BasicAuthenticationSecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type BodyParameter struct {
-	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
-	// The name of the parameter.
-	Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
-	// Determines the location of the parameter.
-	In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"`
-	// Determines whether or not this parameter is required or optional.
-	Required        bool        `protobuf:"varint,4,opt,name=required" json:"required,omitempty"`
-	Schema          *Schema     `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *BodyParameter) Reset()                    { *m = BodyParameter{} }
-func (m *BodyParameter) String() string            { return proto.CompactTextString(m) }
-func (*BodyParameter) ProtoMessage()               {}
-func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
-
-func (m *BodyParameter) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *BodyParameter) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *BodyParameter) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *BodyParameter) GetRequired() bool {
-	if m != nil {
-		return m.Required
-	}
-	return false
-}
-
-func (m *BodyParameter) GetSchema() *Schema {
-	if m != nil {
-		return m.Schema
-	}
-	return nil
-}
-
-func (m *BodyParameter) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// Contact information for the owners of the API.
-type Contact struct {
-	// The identifying name of the contact person/organization.
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// The URL pointing to the contact information.
-	Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
-	// The email address of the contact person/organization.
-	Email           string      `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Contact) Reset()                    { *m = Contact{} }
-func (m *Contact) String() string            { return proto.CompactTextString(m) }
-func (*Contact) ProtoMessage()               {}
-func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *Contact) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Contact) GetUrl() string {
-	if m != nil {
-		return m.Url
-	}
-	return ""
-}
-
-func (m *Contact) GetEmail() string {
-	if m != nil {
-		return m.Email
-	}
-	return ""
-}
-
-func (m *Contact) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Default struct {
-	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Default) Reset()                    { *m = Default{} }
-func (m *Default) String() string            { return proto.CompactTextString(m) }
-func (*Default) ProtoMessage()               {}
-func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
-
-func (m *Default) GetAdditionalProperties() []*NamedAny {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-// One or more JSON objects describing the schemas being consumed and produced by the API.
-type Definitions struct {
-	AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Definitions) Reset()                    { *m = Definitions{} }
-func (m *Definitions) String() string            { return proto.CompactTextString(m) }
-func (*Definitions) ProtoMessage()               {}
-func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-func (m *Definitions) GetAdditionalProperties() []*NamedSchema {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type Document struct {
-	// The Swagger version of this document.
-	Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"`
-	Info    *Info  `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
-	// The host (name or ip) of the API. Example: 'swagger.io'
-	Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"`
-	// The base path to the API. Example: '/api'.
-	BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"`
-	// The transfer protocol of the API.
-	Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"`
-	// A list of MIME types accepted by the API.
-	Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"`
-	// A list of MIME types the API can produce.
-	Produces            []string               `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"`
-	Paths               *Paths                 `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"`
-	Definitions         *Definitions           `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"`
-	Parameters          *ParameterDefinitions  `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"`
-	Responses           *ResponseDefinitions   `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"`
-	Security            []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"`
-	SecurityDefinitions *SecurityDefinitions   `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"`
-	Tags                []*Tag                 `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"`
-	ExternalDocs        *ExternalDocs          `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
-	VendorExtension     []*NamedAny            `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Document) Reset()                    { *m = Document{} }
-func (m *Document) String() string            { return proto.CompactTextString(m) }
-func (*Document) ProtoMessage()               {}
-func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *Document) GetSwagger() string {
-	if m != nil {
-		return m.Swagger
-	}
-	return ""
-}
-
-func (m *Document) GetInfo() *Info {
-	if m != nil {
-		return m.Info
-	}
-	return nil
-}
-
-func (m *Document) GetHost() string {
-	if m != nil {
-		return m.Host
-	}
-	return ""
-}
-
-func (m *Document) GetBasePath() string {
-	if m != nil {
-		return m.BasePath
-	}
-	return ""
-}
-
-func (m *Document) GetSchemes() []string {
-	if m != nil {
-		return m.Schemes
-	}
-	return nil
-}
-
-func (m *Document) GetConsumes() []string {
-	if m != nil {
-		return m.Consumes
-	}
-	return nil
-}
-
-func (m *Document) GetProduces() []string {
-	if m != nil {
-		return m.Produces
-	}
-	return nil
-}
-
-func (m *Document) GetPaths() *Paths {
-	if m != nil {
-		return m.Paths
-	}
-	return nil
-}
-
-func (m *Document) GetDefinitions() *Definitions {
-	if m != nil {
-		return m.Definitions
-	}
-	return nil
-}
-
-func (m *Document) GetParameters() *ParameterDefinitions {
-	if m != nil {
-		return m.Parameters
-	}
-	return nil
-}
-
-func (m *Document) GetResponses() *ResponseDefinitions {
-	if m != nil {
-		return m.Responses
-	}
-	return nil
-}
-
-func (m *Document) GetSecurity() []*SecurityRequirement {
-	if m != nil {
-		return m.Security
-	}
-	return nil
-}
-
-func (m *Document) GetSecurityDefinitions() *SecurityDefinitions {
-	if m != nil {
-		return m.SecurityDefinitions
-	}
-	return nil
-}
-
-func (m *Document) GetTags() []*Tag {
-	if m != nil {
-		return m.Tags
-	}
-	return nil
-}
-
-func (m *Document) GetExternalDocs() *ExternalDocs {
-	if m != nil {
-		return m.ExternalDocs
-	}
-	return nil
-}
-
-func (m *Document) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Examples struct {
-	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Examples) Reset()                    { *m = Examples{} }
-func (m *Examples) String() string            { return proto.CompactTextString(m) }
-func (*Examples) ProtoMessage()               {}
-func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
-
-func (m *Examples) GetAdditionalProperties() []*NamedAny {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-// information about external documentation
-type ExternalDocs struct {
-	Description     string      `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
-	Url             string      `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *ExternalDocs) Reset()                    { *m = ExternalDocs{} }
-func (m *ExternalDocs) String() string            { return proto.CompactTextString(m) }
-func (*ExternalDocs) ProtoMessage()               {}
-func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
-
-func (m *ExternalDocs) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *ExternalDocs) GetUrl() string {
-	if m != nil {
-		return m.Url
-	}
-	return ""
-}
-
-func (m *ExternalDocs) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// A deterministic version of a JSON Schema object.
-type FileSchema struct {
-	Format          string        `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"`
-	Title           string        `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"`
-	Description     string        `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	Default         *Any          `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"`
-	Required        []string      `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"`
-	Type            string        `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
-	ReadOnly        bool          `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
-	ExternalDocs    *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
-	Example         *Any          `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"`
-	VendorExtension []*NamedAny   `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *FileSchema) Reset()                    { *m = FileSchema{} }
-func (m *FileSchema) String() string            { return proto.CompactTextString(m) }
-func (*FileSchema) ProtoMessage()               {}
-func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
-
-func (m *FileSchema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *FileSchema) GetTitle() string {
-	if m != nil {
-		return m.Title
-	}
-	return ""
-}
-
-func (m *FileSchema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *FileSchema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *FileSchema) GetRequired() []string {
-	if m != nil {
-		return m.Required
-	}
-	return nil
-}
-
-func (m *FileSchema) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *FileSchema) GetReadOnly() bool {
-	if m != nil {
-		return m.ReadOnly
-	}
-	return false
-}
-
-func (m *FileSchema) GetExternalDocs() *ExternalDocs {
-	if m != nil {
-		return m.ExternalDocs
-	}
-	return nil
-}
-
-func (m *FileSchema) GetExample() *Any {
-	if m != nil {
-		return m.Example
-	}
-	return nil
-}
-
-func (m *FileSchema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type FormDataParameterSubSchema struct {
-	// Determines whether or not this parameter is required or optional.
-	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
-	// Determines the location of the parameter.
-	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
-	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	// The name of the parameter.
-	Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
-	// allows sending a parameter by name only or with an empty value.
-	AllowEmptyValue  bool             `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"`
-	Type             string           `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *FormDataParameterSubSchema) Reset()                    { *m = FormDataParameterSubSchema{} }
-func (m *FormDataParameterSubSchema) String() string            { return proto.CompactTextString(m) }
-func (*FormDataParameterSubSchema) ProtoMessage()               {}
-func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *FormDataParameterSubSchema) GetRequired() bool {
-	if m != nil {
-		return m.Required
-	}
-	return false
-}
-
-func (m *FormDataParameterSubSchema) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool {
-	if m != nil {
-		return m.AllowEmptyValue
-	}
-	return false
-}
-
-func (m *FormDataParameterSubSchema) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *FormDataParameterSubSchema) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *FormDataParameterSubSchema) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *FormDataParameterSubSchema) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *FormDataParameterSubSchema) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *FormDataParameterSubSchema) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *FormDataParameterSubSchema) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *FormDataParameterSubSchema) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Header struct {
-	Type             string           `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	Description      string           `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Header) Reset()                    { *m = Header{} }
-func (m *Header) String() string            { return proto.CompactTextString(m) }
-func (*Header) ProtoMessage()               {}
-func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-func (m *Header) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Header) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *Header) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *Header) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *Header) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *Header) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *Header) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *Header) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *Header) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *Header) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *Header) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *Header) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *Header) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *Header) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *Header) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *Header) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *Header) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *Header) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Header) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type HeaderParameterSubSchema struct {
-	// Determines whether or not this parameter is required or optional.
-	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
-	// Determines the location of the parameter.
-	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
-	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	// The name of the parameter.
-	Name             string           `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
-	Type             string           `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *HeaderParameterSubSchema) Reset()                    { *m = HeaderParameterSubSchema{} }
-func (m *HeaderParameterSubSchema) String() string            { return proto.CompactTextString(m) }
-func (*HeaderParameterSubSchema) ProtoMessage()               {}
-func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
-
-func (m *HeaderParameterSubSchema) GetRequired() bool {
-	if m != nil {
-		return m.Required
-	}
-	return false
-}
-
-func (m *HeaderParameterSubSchema) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *HeaderParameterSubSchema) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *HeaderParameterSubSchema) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *HeaderParameterSubSchema) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *HeaderParameterSubSchema) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *HeaderParameterSubSchema) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *HeaderParameterSubSchema) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *HeaderParameterSubSchema) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Headers struct {
-	AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Headers) Reset()                    { *m = Headers{} }
-func (m *Headers) String() string            { return proto.CompactTextString(m) }
-func (*Headers) ProtoMessage()               {}
-func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
-
-func (m *Headers) GetAdditionalProperties() []*NamedHeader {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-// General information about the API.
-type Info struct {
-	// A unique and precise title of the API.
-	Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"`
-	// A semantic version number of the API.
-	Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
-	// A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	// The terms of service for the API.
-	TermsOfService  string      `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"`
-	Contact         *Contact    `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"`
-	License         *License    `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Info) Reset()                    { *m = Info{} }
-func (m *Info) String() string            { return proto.CompactTextString(m) }
-func (*Info) ProtoMessage()               {}
-func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
-
-func (m *Info) GetTitle() string {
-	if m != nil {
-		return m.Title
-	}
-	return ""
-}
-
-func (m *Info) GetVersion() string {
-	if m != nil {
-		return m.Version
-	}
-	return ""
-}
-
-func (m *Info) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Info) GetTermsOfService() string {
-	if m != nil {
-		return m.TermsOfService
-	}
-	return ""
-}
-
-func (m *Info) GetContact() *Contact {
-	if m != nil {
-		return m.Contact
-	}
-	return nil
-}
-
-func (m *Info) GetLicense() *License {
-	if m != nil {
-		return m.License
-	}
-	return nil
-}
-
-func (m *Info) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type ItemsItem struct {
-	Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"`
-}
-
-func (m *ItemsItem) Reset()                    { *m = ItemsItem{} }
-func (m *ItemsItem) String() string            { return proto.CompactTextString(m) }
-func (*ItemsItem) ProtoMessage()               {}
-func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
-
-func (m *ItemsItem) GetSchema() []*Schema {
-	if m != nil {
-		return m.Schema
-	}
-	return nil
-}
-
-type JsonReference struct {
-	XRef        string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
-	Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
-}
-
-func (m *JsonReference) Reset()                    { *m = JsonReference{} }
-func (m *JsonReference) String() string            { return proto.CompactTextString(m) }
-func (*JsonReference) ProtoMessage()               {}
-func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
-
-func (m *JsonReference) GetXRef() string {
-	if m != nil {
-		return m.XRef
-	}
-	return ""
-}
-
-func (m *JsonReference) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-type License struct {
-	// The name of the license type. It's encouraged to use an OSI compatible license.
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// The URL pointing to the license.
-	Url             string      `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *License) Reset()                    { *m = License{} }
-func (m *License) String() string            { return proto.CompactTextString(m) }
-func (*License) ProtoMessage()               {}
-func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *License) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *License) GetUrl() string {
-	if m != nil {
-		return m.Url
-	}
-	return ""
-}
-
-func (m *License) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
-type NamedAny struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedAny) Reset()                    { *m = NamedAny{} }
-func (m *NamedAny) String() string            { return proto.CompactTextString(m) }
-func (*NamedAny) ProtoMessage()               {}
-func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
-
-func (m *NamedAny) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedAny) GetValue() *Any {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
-type NamedHeader struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedHeader) Reset()                    { *m = NamedHeader{} }
-func (m *NamedHeader) String() string            { return proto.CompactTextString(m) }
-func (*NamedHeader) ProtoMessage()               {}
-func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
-
-func (m *NamedHeader) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedHeader) GetValue() *Header {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
-type NamedParameter struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedParameter) Reset()                    { *m = NamedParameter{} }
-func (m *NamedParameter) String() string            { return proto.CompactTextString(m) }
-func (*NamedParameter) ProtoMessage()               {}
-func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
-
-func (m *NamedParameter) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedParameter) GetValue() *Parameter {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
-type NamedPathItem struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedPathItem) Reset()                    { *m = NamedPathItem{} }
-func (m *NamedPathItem) String() string            { return proto.CompactTextString(m) }
-func (*NamedPathItem) ProtoMessage()               {}
-func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
-
-func (m *NamedPathItem) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedPathItem) GetValue() *PathItem {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
-type NamedResponse struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedResponse) Reset()                    { *m = NamedResponse{} }
-func (m *NamedResponse) String() string            { return proto.CompactTextString(m) }
-func (*NamedResponse) ProtoMessage()               {}
-func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
-
-func (m *NamedResponse) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedResponse) GetValue() *Response {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
-type NamedResponseValue struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedResponseValue) Reset()                    { *m = NamedResponseValue{} }
-func (m *NamedResponseValue) String() string            { return proto.CompactTextString(m) }
-func (*NamedResponseValue) ProtoMessage()               {}
-func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
-
-func (m *NamedResponseValue) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedResponseValue) GetValue() *ResponseValue {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
-type NamedSchema struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedSchema) Reset()                    { *m = NamedSchema{} }
-func (m *NamedSchema) String() string            { return proto.CompactTextString(m) }
-func (*NamedSchema) ProtoMessage()               {}
-func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
-
-func (m *NamedSchema) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedSchema) GetValue() *Schema {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
-type NamedSecurityDefinitionsItem struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedSecurityDefinitionsItem) Reset()                    { *m = NamedSecurityDefinitionsItem{} }
-func (m *NamedSecurityDefinitionsItem) String() string            { return proto.CompactTextString(m) }
-func (*NamedSecurityDefinitionsItem) ProtoMessage()               {}
-func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
-
-func (m *NamedSecurityDefinitionsItem) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
-type NamedString struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedString) Reset()                    { *m = NamedString{} }
-func (m *NamedString) String() string            { return proto.CompactTextString(m) }
-func (*NamedString) ProtoMessage()               {}
-func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
-
-func (m *NamedString) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedString) GetValue() string {
-	if m != nil {
-		return m.Value
-	}
-	return ""
-}
-
-// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
-type NamedStringArray struct {
-	// Map key
-	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Mapped value
-	Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *NamedStringArray) Reset()                    { *m = NamedStringArray{} }
-func (m *NamedStringArray) String() string            { return proto.CompactTextString(m) }
-func (*NamedStringArray) ProtoMessage()               {}
-func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
-
-func (m *NamedStringArray) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NamedStringArray) GetValue() *StringArray {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type NonBodyParameter struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*NonBodyParameter_HeaderParameterSubSchema
-	//	*NonBodyParameter_FormDataParameterSubSchema
-	//	*NonBodyParameter_QueryParameterSubSchema
-	//	*NonBodyParameter_PathParameterSubSchema
-	Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *NonBodyParameter) Reset()                    { *m = NonBodyParameter{} }
-func (m *NonBodyParameter) String() string            { return proto.CompactTextString(m) }
-func (*NonBodyParameter) ProtoMessage()               {}
-func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
-
-type isNonBodyParameter_Oneof interface {
-	isNonBodyParameter_Oneof()
-}
-
-type NonBodyParameter_HeaderParameterSubSchema struct {
-	HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"`
-}
-type NonBodyParameter_FormDataParameterSubSchema struct {
-	FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"`
-}
-type NonBodyParameter_QueryParameterSubSchema struct {
-	QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"`
-}
-type NonBodyParameter_PathParameterSubSchema struct {
-	PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"`
-}
-
-func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof()   {}
-func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {}
-func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof()    {}
-func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof()     {}
-
-func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema {
-	if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok {
-		return x.HeaderParameterSubSchema
-	}
-	return nil
-}
-
-func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema {
-	if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok {
-		return x.FormDataParameterSubSchema
-	}
-	return nil
-}
-
-func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema {
-	if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok {
-		return x.QueryParameterSubSchema
-	}
-	return nil
-}
-
-func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema {
-	if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok {
-		return x.PathParameterSubSchema
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{
-		(*NonBodyParameter_HeaderParameterSubSchema)(nil),
-		(*NonBodyParameter_FormDataParameterSubSchema)(nil),
-		(*NonBodyParameter_QueryParameterSubSchema)(nil),
-		(*NonBodyParameter_PathParameterSubSchema)(nil),
-	}
-}
-
-func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*NonBodyParameter)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *NonBodyParameter_HeaderParameterSubSchema:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil {
-			return err
-		}
-	case *NonBodyParameter_FormDataParameterSubSchema:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil {
-			return err
-		}
-	case *NonBodyParameter_QueryParameterSubSchema:
-		b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil {
-			return err
-		}
-	case *NonBodyParameter_PathParameterSubSchema:
-		b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*NonBodyParameter)
-	switch tag {
-	case 1: // oneof.header_parameter_sub_schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(HeaderParameterSubSchema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg}
-		return true, err
-	case 2: // oneof.form_data_parameter_sub_schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(FormDataParameterSubSchema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg}
-		return true, err
-	case 3: // oneof.query_parameter_sub_schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(QueryParameterSubSchema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg}
-		return true, err
-	case 4: // oneof.path_parameter_sub_schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(PathParameterSubSchema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*NonBodyParameter)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *NonBodyParameter_HeaderParameterSubSchema:
-		s := proto.Size(x.HeaderParameterSubSchema)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *NonBodyParameter_FormDataParameterSubSchema:
-		s := proto.Size(x.FormDataParameterSubSchema)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *NonBodyParameter_QueryParameterSubSchema:
-		s := proto.Size(x.QueryParameterSubSchema)
-		n += proto.SizeVarint(3<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *NonBodyParameter_PathParameterSubSchema:
-		s := proto.Size(x.PathParameterSubSchema)
-		n += proto.SizeVarint(4<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type Oauth2AccessCodeSecurity struct {
-	Type             string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Flow             string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
-	Scopes           *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
-	AuthorizationUrl string        `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"`
-	TokenUrl         string        `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
-	Description      string        `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"`
-	VendorExtension  []*NamedAny   `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Oauth2AccessCodeSecurity) Reset()                    { *m = Oauth2AccessCodeSecurity{} }
-func (m *Oauth2AccessCodeSecurity) String() string            { return proto.CompactTextString(m) }
-func (*Oauth2AccessCodeSecurity) ProtoMessage()               {}
-func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
-
-func (m *Oauth2AccessCodeSecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Oauth2AccessCodeSecurity) GetFlow() string {
-	if m != nil {
-		return m.Flow
-	}
-	return ""
-}
-
-func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes {
-	if m != nil {
-		return m.Scopes
-	}
-	return nil
-}
-
-func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string {
-	if m != nil {
-		return m.AuthorizationUrl
-	}
-	return ""
-}
-
-func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string {
-	if m != nil {
-		return m.TokenUrl
-	}
-	return ""
-}
-
-func (m *Oauth2AccessCodeSecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Oauth2ApplicationSecurity struct {
-	Type            string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Flow            string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
-	Scopes          *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
-	TokenUrl        string        `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
-	Description     string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
-	VendorExtension []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Oauth2ApplicationSecurity) Reset()                    { *m = Oauth2ApplicationSecurity{} }
-func (m *Oauth2ApplicationSecurity) String() string            { return proto.CompactTextString(m) }
-func (*Oauth2ApplicationSecurity) ProtoMessage()               {}
-func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
-
-func (m *Oauth2ApplicationSecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Oauth2ApplicationSecurity) GetFlow() string {
-	if m != nil {
-		return m.Flow
-	}
-	return ""
-}
-
-func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes {
-	if m != nil {
-		return m.Scopes
-	}
-	return nil
-}
-
-func (m *Oauth2ApplicationSecurity) GetTokenUrl() string {
-	if m != nil {
-		return m.TokenUrl
-	}
-	return ""
-}
-
-func (m *Oauth2ApplicationSecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Oauth2ImplicitSecurity struct {
-	Type             string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Flow             string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
-	Scopes           *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
-	AuthorizationUrl string        `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"`
-	Description      string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
-	VendorExtension  []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Oauth2ImplicitSecurity) Reset()                    { *m = Oauth2ImplicitSecurity{} }
-func (m *Oauth2ImplicitSecurity) String() string            { return proto.CompactTextString(m) }
-func (*Oauth2ImplicitSecurity) ProtoMessage()               {}
-func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
-
-func (m *Oauth2ImplicitSecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Oauth2ImplicitSecurity) GetFlow() string {
-	if m != nil {
-		return m.Flow
-	}
-	return ""
-}
-
-func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes {
-	if m != nil {
-		return m.Scopes
-	}
-	return nil
-}
-
-func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string {
-	if m != nil {
-		return m.AuthorizationUrl
-	}
-	return ""
-}
-
-func (m *Oauth2ImplicitSecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Oauth2PasswordSecurity struct {
-	Type            string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Flow            string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
-	Scopes          *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
-	TokenUrl        string        `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
-	Description     string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
-	VendorExtension []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Oauth2PasswordSecurity) Reset()                    { *m = Oauth2PasswordSecurity{} }
-func (m *Oauth2PasswordSecurity) String() string            { return proto.CompactTextString(m) }
-func (*Oauth2PasswordSecurity) ProtoMessage()               {}
-func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
-
-func (m *Oauth2PasswordSecurity) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Oauth2PasswordSecurity) GetFlow() string {
-	if m != nil {
-		return m.Flow
-	}
-	return ""
-}
-
-func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes {
-	if m != nil {
-		return m.Scopes
-	}
-	return nil
-}
-
-func (m *Oauth2PasswordSecurity) GetTokenUrl() string {
-	if m != nil {
-		return m.TokenUrl
-	}
-	return ""
-}
-
-func (m *Oauth2PasswordSecurity) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Oauth2Scopes struct {
-	AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Oauth2Scopes) Reset()                    { *m = Oauth2Scopes{} }
-func (m *Oauth2Scopes) String() string            { return proto.CompactTextString(m) }
-func (*Oauth2Scopes) ProtoMessage()               {}
-func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
-
-func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type Operation struct {
-	Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"`
-	// A brief summary of the operation.
-	Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
-	// A longer description of the operation, GitHub Flavored Markdown is allowed.
-	Description  string        `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
-	// A unique identifier of the operation.
-	OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"`
-	// A list of MIME types the API can produce.
-	Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"`
-	// A list of MIME types the API can consume.
-	Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"`
-	// The parameters needed to send a valid API call.
-	Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"`
-	Responses  *Responses        `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"`
-	// The transfer protocol of the API.
-	Schemes         []string               `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"`
-	Deprecated      bool                   `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"`
-	Security        []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"`
-	VendorExtension []*NamedAny            `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Operation) Reset()                    { *m = Operation{} }
-func (m *Operation) String() string            { return proto.CompactTextString(m) }
-func (*Operation) ProtoMessage()               {}
-func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
-
-func (m *Operation) GetTags() []string {
-	if m != nil {
-		return m.Tags
-	}
-	return nil
-}
-
-func (m *Operation) GetSummary() string {
-	if m != nil {
-		return m.Summary
-	}
-	return ""
-}
-
-func (m *Operation) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Operation) GetExternalDocs() *ExternalDocs {
-	if m != nil {
-		return m.ExternalDocs
-	}
-	return nil
-}
-
-func (m *Operation) GetOperationId() string {
-	if m != nil {
-		return m.OperationId
-	}
-	return ""
-}
-
-func (m *Operation) GetProduces() []string {
-	if m != nil {
-		return m.Produces
-	}
-	return nil
-}
-
-func (m *Operation) GetConsumes() []string {
-	if m != nil {
-		return m.Consumes
-	}
-	return nil
-}
-
-func (m *Operation) GetParameters() []*ParametersItem {
-	if m != nil {
-		return m.Parameters
-	}
-	return nil
-}
-
-func (m *Operation) GetResponses() *Responses {
-	if m != nil {
-		return m.Responses
-	}
-	return nil
-}
-
-func (m *Operation) GetSchemes() []string {
-	if m != nil {
-		return m.Schemes
-	}
-	return nil
-}
-
-func (m *Operation) GetDeprecated() bool {
-	if m != nil {
-		return m.Deprecated
-	}
-	return false
-}
-
-func (m *Operation) GetSecurity() []*SecurityRequirement {
-	if m != nil {
-		return m.Security
-	}
-	return nil
-}
-
-func (m *Operation) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Parameter struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*Parameter_BodyParameter
-	//	*Parameter_NonBodyParameter
-	Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *Parameter) Reset()                    { *m = Parameter{} }
-func (m *Parameter) String() string            { return proto.CompactTextString(m) }
-func (*Parameter) ProtoMessage()               {}
-func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
-
-type isParameter_Oneof interface {
-	isParameter_Oneof()
-}
-
-type Parameter_BodyParameter struct {
-	BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"`
-}
-type Parameter_NonBodyParameter struct {
-	NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"`
-}
-
-func (*Parameter_BodyParameter) isParameter_Oneof()    {}
-func (*Parameter_NonBodyParameter) isParameter_Oneof() {}
-
-func (m *Parameter) GetOneof() isParameter_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *Parameter) GetBodyParameter() *BodyParameter {
-	if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok {
-		return x.BodyParameter
-	}
-	return nil
-}
-
-func (m *Parameter) GetNonBodyParameter() *NonBodyParameter {
-	if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok {
-		return x.NonBodyParameter
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{
-		(*Parameter_BodyParameter)(nil),
-		(*Parameter_NonBodyParameter)(nil),
-	}
-}
-
-func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*Parameter)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *Parameter_BodyParameter:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.BodyParameter); err != nil {
-			return err
-		}
-	case *Parameter_NonBodyParameter:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.NonBodyParameter); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("Parameter.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*Parameter)
-	switch tag {
-	case 1: // oneof.body_parameter
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(BodyParameter)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &Parameter_BodyParameter{msg}
-		return true, err
-	case 2: // oneof.non_body_parameter
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(NonBodyParameter)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &Parameter_NonBodyParameter{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _Parameter_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*Parameter)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *Parameter_BodyParameter:
-		s := proto.Size(x.BodyParameter)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Parameter_NonBodyParameter:
-		s := proto.Size(x.NonBodyParameter)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-// One or more JSON representations for parameters
-type ParameterDefinitions struct {
-	AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *ParameterDefinitions) Reset()                    { *m = ParameterDefinitions{} }
-func (m *ParameterDefinitions) String() string            { return proto.CompactTextString(m) }
-func (*ParameterDefinitions) ProtoMessage()               {}
-func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
-
-func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type ParametersItem struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*ParametersItem_Parameter
-	//	*ParametersItem_JsonReference
-	Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *ParametersItem) Reset()                    { *m = ParametersItem{} }
-func (m *ParametersItem) String() string            { return proto.CompactTextString(m) }
-func (*ParametersItem) ProtoMessage()               {}
-func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
-
-type isParametersItem_Oneof interface {
-	isParametersItem_Oneof()
-}
-
-type ParametersItem_Parameter struct {
-	Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"`
-}
-type ParametersItem_JsonReference struct {
-	JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"`
-}
-
-func (*ParametersItem_Parameter) isParametersItem_Oneof()     {}
-func (*ParametersItem_JsonReference) isParametersItem_Oneof() {}
-
-func (m *ParametersItem) GetOneof() isParametersItem_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *ParametersItem) GetParameter() *Parameter {
-	if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok {
-		return x.Parameter
-	}
-	return nil
-}
-
-func (m *ParametersItem) GetJsonReference() *JsonReference {
-	if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok {
-		return x.JsonReference
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{
-		(*ParametersItem_Parameter)(nil),
-		(*ParametersItem_JsonReference)(nil),
-	}
-}
-
-func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ParametersItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *ParametersItem_Parameter:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Parameter); err != nil {
-			return err
-		}
-	case *ParametersItem_JsonReference:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.JsonReference); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ParametersItem)
-	switch tag {
-	case 1: // oneof.parameter
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Parameter)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &ParametersItem_Parameter{msg}
-		return true, err
-	case 2: // oneof.json_reference
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(JsonReference)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &ParametersItem_JsonReference{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ParametersItem_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ParametersItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *ParametersItem_Parameter:
-		s := proto.Size(x.Parameter)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ParametersItem_JsonReference:
-		s := proto.Size(x.JsonReference)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type PathItem struct {
-	XRef    string     `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
-	Get     *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"`
-	Put     *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"`
-	Post    *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"`
-	Delete  *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"`
-	Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"`
-	Head    *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"`
-	Patch   *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"`
-	// The parameters needed to send a valid API call.
-	Parameters      []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"`
-	VendorExtension []*NamedAny       `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *PathItem) Reset()                    { *m = PathItem{} }
-func (m *PathItem) String() string            { return proto.CompactTextString(m) }
-func (*PathItem) ProtoMessage()               {}
-func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
-
-func (m *PathItem) GetXRef() string {
-	if m != nil {
-		return m.XRef
-	}
-	return ""
-}
-
-func (m *PathItem) GetGet() *Operation {
-	if m != nil {
-		return m.Get
-	}
-	return nil
-}
-
-func (m *PathItem) GetPut() *Operation {
-	if m != nil {
-		return m.Put
-	}
-	return nil
-}
-
-func (m *PathItem) GetPost() *Operation {
-	if m != nil {
-		return m.Post
-	}
-	return nil
-}
-
-func (m *PathItem) GetDelete() *Operation {
-	if m != nil {
-		return m.Delete
-	}
-	return nil
-}
-
-func (m *PathItem) GetOptions() *Operation {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *PathItem) GetHead() *Operation {
-	if m != nil {
-		return m.Head
-	}
-	return nil
-}
-
-func (m *PathItem) GetPatch() *Operation {
-	if m != nil {
-		return m.Patch
-	}
-	return nil
-}
-
-func (m *PathItem) GetParameters() []*ParametersItem {
-	if m != nil {
-		return m.Parameters
-	}
-	return nil
-}
-
-func (m *PathItem) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type PathParameterSubSchema struct {
-	// Determines whether or not this parameter is required or optional.
-	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
-	// Determines the location of the parameter.
-	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
-	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	// The name of the parameter.
-	Name             string           `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
-	Type             string           `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *PathParameterSubSchema) Reset()                    { *m = PathParameterSubSchema{} }
-func (m *PathParameterSubSchema) String() string            { return proto.CompactTextString(m) }
-func (*PathParameterSubSchema) ProtoMessage()               {}
-func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
-
-func (m *PathParameterSubSchema) GetRequired() bool {
-	if m != nil {
-		return m.Required
-	}
-	return false
-}
-
-func (m *PathParameterSubSchema) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *PathParameterSubSchema) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *PathParameterSubSchema) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *PathParameterSubSchema) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *PathParameterSubSchema) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *PathParameterSubSchema) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *PathParameterSubSchema) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *PathParameterSubSchema) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
-type Paths struct {
-	VendorExtension []*NamedAny      `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-	Path            []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"`
-}
-
-func (m *Paths) Reset()                    { *m = Paths{} }
-func (m *Paths) String() string            { return proto.CompactTextString(m) }
-func (*Paths) ProtoMessage()               {}
-func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
-
-func (m *Paths) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-func (m *Paths) GetPath() []*NamedPathItem {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-type PrimitivesItems struct {
-	Type             string           `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *PrimitivesItems) Reset()                    { *m = PrimitivesItems{} }
-func (m *PrimitivesItems) String() string            { return proto.CompactTextString(m) }
-func (*PrimitivesItems) ProtoMessage()               {}
-func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
-
-func (m *PrimitivesItems) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *PrimitivesItems) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *PrimitivesItems) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *PrimitivesItems) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *PrimitivesItems) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *PrimitivesItems) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *PrimitivesItems) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *PrimitivesItems) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *PrimitivesItems) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *PrimitivesItems) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *PrimitivesItems) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *PrimitivesItems) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Properties struct {
-	AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *Properties) Reset()                    { *m = Properties{} }
-func (m *Properties) String() string            { return proto.CompactTextString(m) }
-func (*Properties) ProtoMessage()               {}
-func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
-
-func (m *Properties) GetAdditionalProperties() []*NamedSchema {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type QueryParameterSubSchema struct {
-	// Determines whether or not this parameter is required or optional.
-	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
-	// Determines the location of the parameter.
-	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
-	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
-	// The name of the parameter.
-	Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
-	// allows sending a parameter by name only or with an empty value.
-	AllowEmptyValue  bool             `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"`
-	Type             string           `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
-	Format           string           `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"`
-	Items            *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"`
-	CollectionFormat string           `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
-	Default          *Any             `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"`
-	Maximum          float64          `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum bool             `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum          float64          `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum bool             `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength        int64            `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength        int64            `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern          string           `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems         int64            `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems         int64            `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems      bool             `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	Enum             []*Any           `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"`
-	MultipleOf       float64          `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	VendorExtension  []*NamedAny      `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *QueryParameterSubSchema) Reset()                    { *m = QueryParameterSubSchema{} }
-func (m *QueryParameterSubSchema) String() string            { return proto.CompactTextString(m) }
-func (*QueryParameterSubSchema) ProtoMessage()               {}
-func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
-
-func (m *QueryParameterSubSchema) GetRequired() bool {
-	if m != nil {
-		return m.Required
-	}
-	return false
-}
-
-func (m *QueryParameterSubSchema) GetIn() string {
-	if m != nil {
-		return m.In
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool {
-	if m != nil {
-		return m.AllowEmptyValue
-	}
-	return false
-}
-
-func (m *QueryParameterSubSchema) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *QueryParameterSubSchema) GetCollectionFormat() string {
-	if m != nil {
-		return m.CollectionFormat
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *QueryParameterSubSchema) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *QueryParameterSubSchema) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *QueryParameterSubSchema) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *QueryParameterSubSchema) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *QueryParameterSubSchema) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *QueryParameterSubSchema) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type Response struct {
-	Description     string      `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
-	Schema          *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"`
-	Headers         *Headers    `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"`
-	Examples        *Examples   `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Response) Reset()                    { *m = Response{} }
-func (m *Response) String() string            { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage()               {}
-func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} }
-
-func (m *Response) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Response) GetSchema() *SchemaItem {
-	if m != nil {
-		return m.Schema
-	}
-	return nil
-}
-
-func (m *Response) GetHeaders() *Headers {
-	if m != nil {
-		return m.Headers
-	}
-	return nil
-}
-
-func (m *Response) GetExamples() *Examples {
-	if m != nil {
-		return m.Examples
-	}
-	return nil
-}
-
-func (m *Response) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// One or more JSON representations for parameters
-type ResponseDefinitions struct {
-	AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *ResponseDefinitions) Reset()                    { *m = ResponseDefinitions{} }
-func (m *ResponseDefinitions) String() string            { return proto.CompactTextString(m) }
-func (*ResponseDefinitions) ProtoMessage()               {}
-func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} }
-
-func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type ResponseValue struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*ResponseValue_Response
-	//	*ResponseValue_JsonReference
-	Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *ResponseValue) Reset()                    { *m = ResponseValue{} }
-func (m *ResponseValue) String() string            { return proto.CompactTextString(m) }
-func (*ResponseValue) ProtoMessage()               {}
-func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} }
-
-type isResponseValue_Oneof interface {
-	isResponseValue_Oneof()
-}
-
-type ResponseValue_Response struct {
-	Response *Response `protobuf:"bytes,1,opt,name=response,oneof"`
-}
-type ResponseValue_JsonReference struct {
-	JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"`
-}
-
-func (*ResponseValue_Response) isResponseValue_Oneof()      {}
-func (*ResponseValue_JsonReference) isResponseValue_Oneof() {}
-
-func (m *ResponseValue) GetOneof() isResponseValue_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *ResponseValue) GetResponse() *Response {
-	if x, ok := m.GetOneof().(*ResponseValue_Response); ok {
-		return x.Response
-	}
-	return nil
-}
-
-func (m *ResponseValue) GetJsonReference() *JsonReference {
-	if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok {
-		return x.JsonReference
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{
-		(*ResponseValue_Response)(nil),
-		(*ResponseValue_JsonReference)(nil),
-	}
-}
-
-func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ResponseValue)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *ResponseValue_Response:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Response); err != nil {
-			return err
-		}
-	case *ResponseValue_JsonReference:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.JsonReference); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ResponseValue)
-	switch tag {
-	case 1: // oneof.response
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Response)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &ResponseValue_Response{msg}
-		return true, err
-	case 2: // oneof.json_reference
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(JsonReference)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &ResponseValue_JsonReference{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ResponseValue_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ResponseValue)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *ResponseValue_Response:
-		s := proto.Size(x.Response)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseValue_JsonReference:
-		s := proto.Size(x.JsonReference)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-// Response objects names can either be any valid HTTP status code or 'default'.
-type Responses struct {
-	ResponseCode    []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"`
-	VendorExtension []*NamedAny           `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Responses) Reset()                    { *m = Responses{} }
-func (m *Responses) String() string            { return proto.CompactTextString(m) }
-func (*Responses) ProtoMessage()               {}
-func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} }
-
-func (m *Responses) GetResponseCode() []*NamedResponseValue {
-	if m != nil {
-		return m.ResponseCode
-	}
-	return nil
-}
-
-func (m *Responses) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-// A deterministic version of a JSON Schema object.
-type Schema struct {
-	XRef                 string                    `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
-	Format               string                    `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
-	Title                string                    `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"`
-	Description          string                    `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
-	Default              *Any                      `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
-	MultipleOf           float64                   `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
-	Maximum              float64                   `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"`
-	ExclusiveMaximum     bool                      `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
-	Minimum              float64                   `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"`
-	ExclusiveMinimum     bool                      `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
-	MaxLength            int64                     `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
-	MinLength            int64                     `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
-	Pattern              string                    `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"`
-	MaxItems             int64                     `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
-	MinItems             int64                     `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
-	UniqueItems          bool                      `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
-	MaxProperties        int64                     `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"`
-	MinProperties        int64                     `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"`
-	Required             []string                  `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"`
-	Enum                 []*Any                    `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
-	AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-	Type                 *TypeItem                 `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"`
-	Items                *ItemsItem                `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"`
-	AllOf                []*Schema                 `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"`
-	Properties           *Properties               `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"`
-	Discriminator        string                    `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"`
-	ReadOnly             bool                      `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
-	Xml                  *Xml                      `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"`
-	ExternalDocs         *ExternalDocs             `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
-	Example              *Any                      `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"`
-	VendorExtension      []*NamedAny               `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Schema) Reset()                    { *m = Schema{} }
-func (m *Schema) String() string            { return proto.CompactTextString(m) }
-func (*Schema) ProtoMessage()               {}
-func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} }
-
-func (m *Schema) GetXRef() string {
-	if m != nil {
-		return m.XRef
-	}
-	return ""
-}
-
-func (m *Schema) GetFormat() string {
-	if m != nil {
-		return m.Format
-	}
-	return ""
-}
-
-func (m *Schema) GetTitle() string {
-	if m != nil {
-		return m.Title
-	}
-	return ""
-}
-
-func (m *Schema) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Schema) GetDefault() *Any {
-	if m != nil {
-		return m.Default
-	}
-	return nil
-}
-
-func (m *Schema) GetMultipleOf() float64 {
-	if m != nil {
-		return m.MultipleOf
-	}
-	return 0
-}
-
-func (m *Schema) GetMaximum() float64 {
-	if m != nil {
-		return m.Maximum
-	}
-	return 0
-}
-
-func (m *Schema) GetExclusiveMaximum() bool {
-	if m != nil {
-		return m.ExclusiveMaximum
-	}
-	return false
-}
-
-func (m *Schema) GetMinimum() float64 {
-	if m != nil {
-		return m.Minimum
-	}
-	return 0
-}
-
-func (m *Schema) GetExclusiveMinimum() bool {
-	if m != nil {
-		return m.ExclusiveMinimum
-	}
-	return false
-}
-
-func (m *Schema) GetMaxLength() int64 {
-	if m != nil {
-		return m.MaxLength
-	}
-	return 0
-}
-
-func (m *Schema) GetMinLength() int64 {
-	if m != nil {
-		return m.MinLength
-	}
-	return 0
-}
-
-func (m *Schema) GetPattern() string {
-	if m != nil {
-		return m.Pattern
-	}
-	return ""
-}
-
-func (m *Schema) GetMaxItems() int64 {
-	if m != nil {
-		return m.MaxItems
-	}
-	return 0
-}
-
-func (m *Schema) GetMinItems() int64 {
-	if m != nil {
-		return m.MinItems
-	}
-	return 0
-}
-
-func (m *Schema) GetUniqueItems() bool {
-	if m != nil {
-		return m.UniqueItems
-	}
-	return false
-}
-
-func (m *Schema) GetMaxProperties() int64 {
-	if m != nil {
-		return m.MaxProperties
-	}
-	return 0
-}
-
-func (m *Schema) GetMinProperties() int64 {
-	if m != nil {
-		return m.MinProperties
-	}
-	return 0
-}
-
-func (m *Schema) GetRequired() []string {
-	if m != nil {
-		return m.Required
-	}
-	return nil
-}
-
-func (m *Schema) GetEnum() []*Any {
-	if m != nil {
-		return m.Enum
-	}
-	return nil
-}
-
-func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-func (m *Schema) GetType() *TypeItem {
-	if m != nil {
-		return m.Type
-	}
-	return nil
-}
-
-func (m *Schema) GetItems() *ItemsItem {
-	if m != nil {
-		return m.Items
-	}
-	return nil
-}
-
-func (m *Schema) GetAllOf() []*Schema {
-	if m != nil {
-		return m.AllOf
-	}
-	return nil
-}
-
-func (m *Schema) GetProperties() *Properties {
-	if m != nil {
-		return m.Properties
-	}
-	return nil
-}
-
-func (m *Schema) GetDiscriminator() string {
-	if m != nil {
-		return m.Discriminator
-	}
-	return ""
-}
-
-func (m *Schema) GetReadOnly() bool {
-	if m != nil {
-		return m.ReadOnly
-	}
-	return false
-}
-
-func (m *Schema) GetXml() *Xml {
-	if m != nil {
-		return m.Xml
-	}
-	return nil
-}
-
-func (m *Schema) GetExternalDocs() *ExternalDocs {
-	if m != nil {
-		return m.ExternalDocs
-	}
-	return nil
-}
-
-func (m *Schema) GetExample() *Any {
-	if m != nil {
-		return m.Example
-	}
-	return nil
-}
-
-func (m *Schema) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type SchemaItem struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*SchemaItem_Schema
-	//	*SchemaItem_FileSchema
-	Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *SchemaItem) Reset()                    { *m = SchemaItem{} }
-func (m *SchemaItem) String() string            { return proto.CompactTextString(m) }
-func (*SchemaItem) ProtoMessage()               {}
-func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} }
-
-type isSchemaItem_Oneof interface {
-	isSchemaItem_Oneof()
-}
-
-type SchemaItem_Schema struct {
-	Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"`
-}
-type SchemaItem_FileSchema struct {
-	FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"`
-}
-
-func (*SchemaItem_Schema) isSchemaItem_Oneof()     {}
-func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {}
-
-func (m *SchemaItem) GetOneof() isSchemaItem_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *SchemaItem) GetSchema() *Schema {
-	if x, ok := m.GetOneof().(*SchemaItem_Schema); ok {
-		return x.Schema
-	}
-	return nil
-}
-
-func (m *SchemaItem) GetFileSchema() *FileSchema {
-	if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok {
-		return x.FileSchema
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{
-		(*SchemaItem_Schema)(nil),
-		(*SchemaItem_FileSchema)(nil),
-	}
-}
-
-func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*SchemaItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *SchemaItem_Schema:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Schema); err != nil {
-			return err
-		}
-	case *SchemaItem_FileSchema:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.FileSchema); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*SchemaItem)
-	switch tag {
-	case 1: // oneof.schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Schema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SchemaItem_Schema{msg}
-		return true, err
-	case 2: // oneof.file_schema
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(FileSchema)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SchemaItem_FileSchema{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _SchemaItem_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*SchemaItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *SchemaItem_Schema:
-		s := proto.Size(x.Schema)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SchemaItem_FileSchema:
-		s := proto.Size(x.FileSchema)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type SecurityDefinitions struct {
-	AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *SecurityDefinitions) Reset()                    { *m = SecurityDefinitions{} }
-func (m *SecurityDefinitions) String() string            { return proto.CompactTextString(m) }
-func (*SecurityDefinitions) ProtoMessage()               {}
-func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} }
-
-func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type SecurityDefinitionsItem struct {
-	// Types that are valid to be assigned to Oneof:
-	//	*SecurityDefinitionsItem_BasicAuthenticationSecurity
-	//	*SecurityDefinitionsItem_ApiKeySecurity
-	//	*SecurityDefinitionsItem_Oauth2ImplicitSecurity
-	//	*SecurityDefinitionsItem_Oauth2PasswordSecurity
-	//	*SecurityDefinitionsItem_Oauth2ApplicationSecurity
-	//	*SecurityDefinitionsItem_Oauth2AccessCodeSecurity
-	Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"`
-}
-
-func (m *SecurityDefinitionsItem) Reset()                    { *m = SecurityDefinitionsItem{} }
-func (m *SecurityDefinitionsItem) String() string            { return proto.CompactTextString(m) }
-func (*SecurityDefinitionsItem) ProtoMessage()               {}
-func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} }
-
-type isSecurityDefinitionsItem_Oneof interface {
-	isSecurityDefinitionsItem_Oneof()
-}
-
-type SecurityDefinitionsItem_BasicAuthenticationSecurity struct {
-	BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"`
-}
-type SecurityDefinitionsItem_ApiKeySecurity struct {
-	ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"`
-}
-type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct {
-	Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"`
-}
-type SecurityDefinitionsItem_Oauth2PasswordSecurity struct {
-	Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"`
-}
-type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct {
-	Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"`
-}
-type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct {
-	Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"`
-}
-
-func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {}
-func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof()              {}
-func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof()      {}
-func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof()      {}
-func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof()   {}
-func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof()    {}
-
-func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof {
-	if m != nil {
-		return m.Oneof
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok {
-		return x.BasicAuthenticationSecurity
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok {
-		return x.ApiKeySecurity
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok {
-		return x.Oauth2ImplicitSecurity
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok {
-		return x.Oauth2PasswordSecurity
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok {
-		return x.Oauth2ApplicationSecurity
-	}
-	return nil
-}
-
-func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity {
-	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok {
-		return x.Oauth2AccessCodeSecurity
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{
-		(*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
-		(*SecurityDefinitionsItem_ApiKeySecurity)(nil),
-		(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
-		(*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil),
-		(*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil),
-		(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil),
-	}
-}
-
-func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*SecurityDefinitionsItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *SecurityDefinitionsItem_BasicAuthenticationSecurity:
-		b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil {
-			return err
-		}
-	case *SecurityDefinitionsItem_ApiKeySecurity:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ApiKeySecurity); err != nil {
-			return err
-		}
-	case *SecurityDefinitionsItem_Oauth2ImplicitSecurity:
-		b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil {
-			return err
-		}
-	case *SecurityDefinitionsItem_Oauth2PasswordSecurity:
-		b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil {
-			return err
-		}
-	case *SecurityDefinitionsItem_Oauth2ApplicationSecurity:
-		b.EncodeVarint(5<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil {
-			return err
-		}
-	case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*SecurityDefinitionsItem)
-	switch tag {
-	case 1: // oneof.basic_authentication_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(BasicAuthenticationSecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg}
-		return true, err
-	case 2: // oneof.api_key_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ApiKeySecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg}
-		return true, err
-	case 3: // oneof.oauth2_implicit_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Oauth2ImplicitSecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg}
-		return true, err
-	case 4: // oneof.oauth2_password_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Oauth2PasswordSecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg}
-		return true, err
-	case 5: // oneof.oauth2_application_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Oauth2ApplicationSecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg}
-		return true, err
-	case 6: // oneof.oauth2_access_code_security
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Oauth2AccessCodeSecurity)
-		err := b.DecodeMessage(msg)
-		m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*SecurityDefinitionsItem)
-	// oneof
-	switch x := m.Oneof.(type) {
-	case *SecurityDefinitionsItem_BasicAuthenticationSecurity:
-		s := proto.Size(x.BasicAuthenticationSecurity)
-		n += proto.SizeVarint(1<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SecurityDefinitionsItem_ApiKeySecurity:
-		s := proto.Size(x.ApiKeySecurity)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SecurityDefinitionsItem_Oauth2ImplicitSecurity:
-		s := proto.Size(x.Oauth2ImplicitSecurity)
-		n += proto.SizeVarint(3<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SecurityDefinitionsItem_Oauth2PasswordSecurity:
-		s := proto.Size(x.Oauth2PasswordSecurity)
-		n += proto.SizeVarint(4<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SecurityDefinitionsItem_Oauth2ApplicationSecurity:
-		s := proto.Size(x.Oauth2ApplicationSecurity)
-		n += proto.SizeVarint(5<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity:
-		s := proto.Size(x.Oauth2AccessCodeSecurity)
-		n += proto.SizeVarint(6<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type SecurityRequirement struct {
-	AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *SecurityRequirement) Reset()                    { *m = SecurityRequirement{} }
-func (m *SecurityRequirement) String() string            { return proto.CompactTextString(m) }
-func (*SecurityRequirement) ProtoMessage()               {}
-func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} }
-
-func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type StringArray struct {
-	Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
-}
-
-func (m *StringArray) Reset()                    { *m = StringArray{} }
-func (m *StringArray) String() string            { return proto.CompactTextString(m) }
-func (*StringArray) ProtoMessage()               {}
-func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} }
-
-func (m *StringArray) GetValue() []string {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type Tag struct {
-	Name            string        `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Description     string        `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
-	ExternalDocs    *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
-	VendorExtension []*NamedAny   `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Tag) Reset()                    { *m = Tag{} }
-func (m *Tag) String() string            { return proto.CompactTextString(m) }
-func (*Tag) ProtoMessage()               {}
-func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} }
-
-func (m *Tag) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Tag) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Tag) GetExternalDocs() *ExternalDocs {
-	if m != nil {
-		return m.ExternalDocs
-	}
-	return nil
-}
-
-func (m *Tag) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-type TypeItem struct {
-	Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
-}
-
-func (m *TypeItem) Reset()                    { *m = TypeItem{} }
-func (m *TypeItem) String() string            { return proto.CompactTextString(m) }
-func (*TypeItem) ProtoMessage()               {}
-func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} }
-
-func (m *TypeItem) GetValue() []string {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-// Any property starting with x- is valid.
-type VendorExtension struct {
-	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
-}
-
-func (m *VendorExtension) Reset()                    { *m = VendorExtension{} }
-func (m *VendorExtension) String() string            { return proto.CompactTextString(m) }
-func (*VendorExtension) ProtoMessage()               {}
-func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} }
-
-func (m *VendorExtension) GetAdditionalProperties() []*NamedAny {
-	if m != nil {
-		return m.AdditionalProperties
-	}
-	return nil
-}
-
-type Xml struct {
-	Name            string      `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Namespace       string      `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"`
-	Prefix          string      `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"`
-	Attribute       bool        `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"`
-	Wrapped         bool        `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"`
-	VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
-}
-
-func (m *Xml) Reset()                    { *m = Xml{} }
-func (m *Xml) String() string            { return proto.CompactTextString(m) }
-func (*Xml) ProtoMessage()               {}
-func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} }
-
-func (m *Xml) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Xml) GetNamespace() string {
-	if m != nil {
-		return m.Namespace
-	}
-	return ""
-}
-
-func (m *Xml) GetPrefix() string {
-	if m != nil {
-		return m.Prefix
-	}
-	return ""
-}
-
-func (m *Xml) GetAttribute() bool {
-	if m != nil {
-		return m.Attribute
-	}
-	return false
-}
-
-func (m *Xml) GetWrapped() bool {
-	if m != nil {
-		return m.Wrapped
-	}
-	return false
-}
-
-func (m *Xml) GetVendorExtension() []*NamedAny {
-	if m != nil {
-		return m.VendorExtension
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem")
-	proto.RegisterType((*Any)(nil), "openapi.v2.Any")
-	proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity")
-	proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity")
-	proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter")
-	proto.RegisterType((*Contact)(nil), "openapi.v2.Contact")
-	proto.RegisterType((*Default)(nil), "openapi.v2.Default")
-	proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions")
-	proto.RegisterType((*Document)(nil), "openapi.v2.Document")
-	proto.RegisterType((*Examples)(nil), "openapi.v2.Examples")
-	proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs")
-	proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema")
-	proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema")
-	proto.RegisterType((*Header)(nil), "openapi.v2.Header")
-	proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema")
-	proto.RegisterType((*Headers)(nil), "openapi.v2.Headers")
-	proto.RegisterType((*Info)(nil), "openapi.v2.Info")
-	proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem")
-	proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference")
-	proto.RegisterType((*License)(nil), "openapi.v2.License")
-	proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny")
-	proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader")
-	proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter")
-	proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem")
-	proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse")
-	proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue")
-	proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema")
-	proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem")
-	proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString")
-	proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray")
-	proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter")
-	proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity")
-	proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity")
-	proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity")
-	proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity")
-	proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes")
-	proto.RegisterType((*Operation)(nil), "openapi.v2.Operation")
-	proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter")
-	proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions")
-	proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem")
-	proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem")
-	proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema")
-	proto.RegisterType((*Paths)(nil), "openapi.v2.Paths")
-	proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems")
-	proto.RegisterType((*Properties)(nil), "openapi.v2.Properties")
-	proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema")
-	proto.RegisterType((*Response)(nil), "openapi.v2.Response")
-	proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions")
-	proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue")
-	proto.RegisterType((*Responses)(nil), "openapi.v2.Responses")
-	proto.RegisterType((*Schema)(nil), "openapi.v2.Schema")
-	proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem")
-	proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions")
-	proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem")
-	proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement")
-	proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray")
-	proto.RegisterType((*Tag)(nil), "openapi.v2.Tag")
-	proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem")
-	proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension")
-	proto.RegisterType((*Xml)(nil), "openapi.v2.Xml")
-}
-
-func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
-	// 3129 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57,
-	0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c,
-	0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb,
-	0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a,
-	0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5,
-	0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d,
-	0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7,
-	0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7,
-	0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f,
-	0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c,
-	0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c,
-	0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc,
-	0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01,
-	0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2,
-	0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5,
-	0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23,
-	0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51,
-	0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2,
-	0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5,
-	0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c,
-	0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89,
-	0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f,
-	0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d,
-	0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c,
-	0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e,
-	0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1,
-	0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55,
-	0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87,
-	0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4,
-	0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30,
-	0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6,
-	0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17,
-	0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4,
-	0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74,
-	0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2,
-	0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4,
-	0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0,
-	0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa,
-	0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61,
-	0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e,
-	0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b,
-	0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0,
-	0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7,
-	0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5,
-	0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e,
-	0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04,
-	0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d,
-	0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d,
-	0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13,
-	0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18,
-	0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9,
-	0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73,
-	0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a,
-	0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9,
-	0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8,
-	0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19,
-	0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9,
-	0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60,
-	0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29,
-	0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92,
-	0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc,
-	0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a,
-	0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe,
-	0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6,
-	0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8,
-	0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90,
-	0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf,
-	0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64,
-	0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0,
-	0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d,
-	0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18,
-	0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4,
-	0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58,
-	0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21,
-	0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d,
-	0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9,
-	0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67,
-	0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17,
-	0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27,
-	0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5,
-	0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a,
-	0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4,
-	0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e,
-	0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2,
-	0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87,
-	0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38,
-	0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93,
-	0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0,
-	0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b,
-	0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2,
-	0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86,
-	0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c,
-	0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02,
-	0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3,
-	0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3,
-	0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e,
-	0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7,
-	0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94,
-	0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34,
-	0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a,
-	0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8,
-	0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b,
-	0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5,
-	0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e,
-	0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89,
-	0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6,
-	0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c,
-	0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14,
-	0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41,
-	0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7,
-	0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d,
-	0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc,
-	0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7,
-	0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3,
-	0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69,
-	0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36,
-	0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e,
-	0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c,
-	0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab,
-	0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33,
-	0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07,
-	0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f,
-	0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f,
-	0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b,
-	0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc,
-	0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e,
-	0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad,
-	0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16,
-	0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89,
-	0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45,
-	0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8,
-	0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b,
-	0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08,
-	0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93,
-	0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7,
-	0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb,
-	0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74,
-	0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41,
-	0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20,
-	0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a,
-	0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94,
-	0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e,
-	0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4,
-	0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6,
-	0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36,
-	0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a,
-	0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58,
-	0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46,
-	0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88,
-	0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c,
-	0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81,
-	0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61,
-	0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4,
-	0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea,
-	0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf,
-	0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b,
-	0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0,
-	0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4,
-	0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5,
-	0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44,
-	0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f,
-	0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36,
-	0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd,
-	0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e,
-	0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb,
-	0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d,
-	0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77,
-	0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9,
-	0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5,
-	0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4,
-	0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6,
-	0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f,
-	0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb,
-	0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7,
-	0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66,
-	0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9,
-	0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d,
-	0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2,
-	0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c,
-	0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2,
-	0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62,
-	0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7,
-	0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1,
-	0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21,
-	0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e,
-	0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6,
-	0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe,
-	0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c,
-	0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae,
-	0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e,
-	0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1,
-	0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8,
-	0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47,
-	0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf,
-	0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff,
-	0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00,
-}
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
deleted file mode 100644
index 557c880..0000000
--- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
+++ /dev/null
@@ -1,663 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// THIS FILE IS AUTOMATICALLY GENERATED.
-
-syntax = "proto3";
-
-package openapi.v2;
-
-import "google/protobuf/any.proto";
-
-// This option lets the proto compiler generate Java code inside the package
-// name (see below) instead of inside an outer class. It creates a simpler
-// developer experience by reducing one-level of name nesting and be
-// consistent with most programming languages that don't support outer classes.
-option java_multiple_files = true;
-
-// The Java outer classname should be the filename in UpperCamelCase. This
-// class is only used to hold proto descriptor, so developers don't need to
-// work with it directly.
-option java_outer_classname = "OpenAPIProto";
-
-// The Java package name must be proto package name with proper prefix.
-option java_package = "org.openapi_v2";
-
-// A reasonable prefix for the Objective-C symbols generated from the package.
-// It should at a minimum be 3 characters long, all uppercase, and convention
-// is to use an abbreviation of the package name. Something short, but
-// hopefully unique enough to not conflict with things that may come along in
-// the future. 'GPB' is reserved for the protocol buffer implementation itself.
-option objc_class_prefix = "OAS";
-
-message AdditionalPropertiesItem {
-  oneof oneof {
-    Schema schema = 1;
-    bool boolean = 2;
-  }
-}
-
-message Any {
-  google.protobuf.Any value = 1;
-  string yaml = 2;
-}
-
-message ApiKeySecurity {
-  string type = 1;
-  string name = 2;
-  string in = 3;
-  string description = 4;
-  repeated NamedAny vendor_extension = 5;
-}
-
-message BasicAuthenticationSecurity {
-  string type = 1;
-  string description = 2;
-  repeated NamedAny vendor_extension = 3;
-}
-
-message BodyParameter {
-  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-  string description = 1;
-  // The name of the parameter.
-  string name = 2;
-  // Determines the location of the parameter.
-  string in = 3;
-  // Determines whether or not this parameter is required or optional.
-  bool required = 4;
-  Schema schema = 5;
-  repeated NamedAny vendor_extension = 6;
-}
-
-// Contact information for the owners of the API.
-message Contact {
-  // The identifying name of the contact person/organization.
-  string name = 1;
-  // The URL pointing to the contact information.
-  string url = 2;
-  // The email address of the contact person/organization.
-  string email = 3;
-  repeated NamedAny vendor_extension = 4;
-}
-
-message Default {
-  repeated NamedAny additional_properties = 1;
-}
-
-// One or more JSON objects describing the schemas being consumed and produced by the API.
-message Definitions {
-  repeated NamedSchema additional_properties = 1;
-}
-
-message Document {
-  // The Swagger version of this document.
-  string swagger = 1;
-  Info info = 2;
-  // The host (name or ip) of the API. Example: 'swagger.io'
-  string host = 3;
-  // The base path to the API. Example: '/api'.
-  string base_path = 4;
-  // The transfer protocol of the API.
-  repeated string schemes = 5;
-  // A list of MIME types accepted by the API.
-  repeated string consumes = 6;
-  // A list of MIME types the API can produce.
-  repeated string produces = 7;
-  Paths paths = 8;
-  Definitions definitions = 9;
-  ParameterDefinitions parameters = 10;
-  ResponseDefinitions responses = 11;
-  repeated SecurityRequirement security = 12;
-  SecurityDefinitions security_definitions = 13;
-  repeated Tag tags = 14;
-  ExternalDocs external_docs = 15;
-  repeated NamedAny vendor_extension = 16;
-}
-
-message Examples {
-  repeated NamedAny additional_properties = 1;
-}
-
-// information about external documentation
-message ExternalDocs {
-  string description = 1;
-  string url = 2;
-  repeated NamedAny vendor_extension = 3;
-}
-
-// A deterministic version of a JSON Schema object.
-message FileSchema {
-  string format = 1;
-  string title = 2;
-  string description = 3;
-  Any default = 4;
-  repeated string required = 5;
-  string type = 6;
-  bool read_only = 7;
-  ExternalDocs external_docs = 8;
-  Any example = 9;
-  repeated NamedAny vendor_extension = 10;
-}
-
-message FormDataParameterSubSchema {
-  // Determines whether or not this parameter is required or optional.
-  bool required = 1;
-  // Determines the location of the parameter.
-  string in = 2;
-  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-  string description = 3;
-  // The name of the parameter.
-  string name = 4;
-  // allows sending a parameter by name only or with an empty value.
-  bool allow_empty_value = 5;
-  string type = 6;
-  string format = 7;
-  PrimitivesItems items = 8;
-  string collection_format = 9;
-  Any default = 10;
-  double maximum = 11;
-  bool exclusive_maximum = 12;
-  double minimum = 13;
-  bool exclusive_minimum = 14;
-  int64 max_length = 15;
-  int64 min_length = 16;
-  string pattern = 17;
-  int64 max_items = 18;
-  int64 min_items = 19;
-  bool unique_items = 20;
-  repeated Any enum = 21;
-  double multiple_of = 22;
-  repeated NamedAny vendor_extension = 23;
-}
-
-message Header {
-  string type = 1;
-  string format = 2;
-  PrimitivesItems items = 3;
-  string collection_format = 4;
-  Any default = 5;
-  double maximum = 6;
-  bool exclusive_maximum = 7;
-  double minimum = 8;
-  bool exclusive_minimum = 9;
-  int64 max_length = 10;
-  int64 min_length = 11;
-  string pattern = 12;
-  int64 max_items = 13;
-  int64 min_items = 14;
-  bool unique_items = 15;
-  repeated Any enum = 16;
-  double multiple_of = 17;
-  string description = 18;
-  repeated NamedAny vendor_extension = 19;
-}
-
-message HeaderParameterSubSchema {
-  // Determines whether or not this parameter is required or optional.
-  bool required = 1;
-  // Determines the location of the parameter.
-  string in = 2;
-  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-  string description = 3;
-  // The name of the parameter.
-  string name = 4;
-  string type = 5;
-  string format = 6;
-  PrimitivesItems items = 7;
-  string collection_format = 8;
-  Any default = 9;
-  double maximum = 10;
-  bool exclusive_maximum = 11;
-  double minimum = 12;
-  bool exclusive_minimum = 13;
-  int64 max_length = 14;
-  int64 min_length = 15;
-  string pattern = 16;
-  int64 max_items = 17;
-  int64 min_items = 18;
-  bool unique_items = 19;
-  repeated Any enum = 20;
-  double multiple_of = 21;
-  repeated NamedAny vendor_extension = 22;
-}
-
-message Headers {
-  repeated NamedHeader additional_properties = 1;
-}
-
-// General information about the API.
-message Info {
-  // A unique and precise title of the API.
-  string title = 1;
-  // A semantic version number of the API.
-  string version = 2;
-  // A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed.
-  string description = 3;
-  // The terms of service for the API.
-  string terms_of_service = 4;
-  Contact contact = 5;
-  License license = 6;
-  repeated NamedAny vendor_extension = 7;
-}
-
-message ItemsItem {
-  repeated Schema schema = 1;
-}
-
-message JsonReference {
-  string _ref = 1;
-  string description = 2;
-}
-
-message License {
-  // The name of the license type. It's encouraged to use an OSI compatible license.
-  string name = 1;
-  // The URL pointing to the license.
-  string url = 2;
-  repeated NamedAny vendor_extension = 3;
-}
-
-// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
-message NamedAny {
-  // Map key
-  string name = 1;
-  // Mapped value
-  Any value = 2;
-}
-
-// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
-message NamedHeader {
-  // Map key
-  string name = 1;
-  // Mapped value
-  Header value = 2;
-}
-
-// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
-message NamedParameter {
-  // Map key
-  string name = 1;
-  // Mapped value
-  Parameter value = 2;
-}
-
-// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
-message NamedPathItem {
-  // Map key
-  string name = 1;
-  // Mapped value
-  PathItem value = 2;
-}
-
-// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
-message NamedResponse {
-  // Map key
-  string name = 1;
-  // Mapped value
-  Response value = 2;
-}
-
-// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
-message NamedResponseValue {
-  // Map key
-  string name = 1;
-  // Mapped value
-  ResponseValue value = 2;
-}
-
-// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
-message NamedSchema {
-  // Map key
-  string name = 1;
-  // Mapped value
-  Schema value = 2;
-}
-
-// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
-message NamedSecurityDefinitionsItem {
-  // Map key
-  string name = 1;
-  // Mapped value
-  SecurityDefinitionsItem value = 2;
-}
-
-// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
-message NamedString {
-  // Map key
-  string name = 1;
-  // Mapped value
-  string value = 2;
-}
-
-// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
-message NamedStringArray {
-  // Map key
-  string name = 1;
-  // Mapped value
-  StringArray value = 2;
-}
-
-message NonBodyParameter {
-  oneof oneof {
-    HeaderParameterSubSchema header_parameter_sub_schema = 1;
-    FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
-    QueryParameterSubSchema query_parameter_sub_schema = 3;
-    PathParameterSubSchema path_parameter_sub_schema = 4;
-  }
-}
-
-message Oauth2AccessCodeSecurity {
-  string type = 1;
-  string flow = 2;
-  Oauth2Scopes scopes = 3;
-  string authorization_url = 4;
-  string token_url = 5;
-  string description = 6;
-  repeated NamedAny vendor_extension = 7;
-}
-
-message Oauth2ApplicationSecurity {
-  string type = 1;
-  string flow = 2;
-  Oauth2Scopes scopes = 3;
-  string token_url = 4;
-  string description = 5;
-  repeated NamedAny vendor_extension = 6;
-}
-
-message Oauth2ImplicitSecurity {
-  string type = 1;
-  string flow = 2;
-  Oauth2Scopes scopes = 3;
-  string authorization_url = 4;
-  string description = 5;
-  repeated NamedAny vendor_extension = 6;
-}
-
-message Oauth2PasswordSecurity {
-  string type = 1;
-  string flow = 2;
-  Oauth2Scopes scopes = 3;
-  string token_url = 4;
-  string description = 5;
-  repeated NamedAny vendor_extension = 6;
-}
-
-message Oauth2Scopes {
-  repeated NamedString additional_properties = 1;
-}
-
-message Operation {
-  repeated string tags = 1;
-  // A brief summary of the operation.
-  string summary = 2;
-  // A longer description of the operation, GitHub Flavored Markdown is allowed.
-  string description = 3;
-  ExternalDocs external_docs = 4;
-  // A unique identifier of the operation.
-  string operation_id = 5;
-  // A list of MIME types the API can produce.
-  repeated string produces = 6;
-  // A list of MIME types the API can consume.
-  repeated string consumes = 7;
-  // The parameters needed to send a valid API call.
-  repeated ParametersItem parameters = 8;
-  Responses responses = 9;
-  // The transfer protocol of the API.
-  repeated string schemes = 10;
-  bool deprecated = 11;
-  repeated SecurityRequirement security = 12;
-  repeated NamedAny vendor_extension = 13;
-}
-
-message Parameter {
-  oneof oneof {
-    BodyParameter body_parameter = 1;
-    NonBodyParameter non_body_parameter = 2;
-  }
-}
-
-// One or more JSON representations for parameters
-message ParameterDefinitions {
-  repeated NamedParameter additional_properties = 1;
-}
-
-message ParametersItem {
-  oneof oneof {
-    Parameter parameter = 1;
-    JsonReference json_reference = 2;
-  }
-}
-
-message PathItem {
-  string _ref = 1;
-  Operation get = 2;
-  Operation put = 3;
-  Operation post = 4;
-  Operation delete = 5;
-  Operation options = 6;
-  Operation head = 7;
-  Operation patch = 8;
-  // The parameters needed to send a valid API call.
-  repeated ParametersItem parameters = 9;
-  repeated NamedAny vendor_extension = 10;
-}
-
-message PathParameterSubSchema {
-  // Determines whether or not this parameter is required or optional.
-  bool required = 1;
-  // Determines the location of the parameter.
-  string in = 2;
-  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-  string description = 3;
-  // The name of the parameter.
-  string name = 4;
-  string type = 5;
-  string format = 6;
-  PrimitivesItems items = 7;
-  string collection_format = 8;
-  Any default = 9;
-  double maximum = 10;
-  bool exclusive_maximum = 11;
-  double minimum = 12;
-  bool exclusive_minimum = 13;
-  int64 max_length = 14;
-  int64 min_length = 15;
-  string pattern = 16;
-  int64 max_items = 17;
-  int64 min_items = 18;
-  bool unique_items = 19;
-  repeated Any enum = 20;
-  double multiple_of = 21;
-  repeated NamedAny vendor_extension = 22;
-}
-
-// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
-message Paths {
-  repeated NamedAny vendor_extension = 1;
-  repeated NamedPathItem path = 2;
-}
-
-message PrimitivesItems {
-  string type = 1;
-  string format = 2;
-  PrimitivesItems items = 3;
-  string collection_format = 4;
-  Any default = 5;
-  double maximum = 6;
-  bool exclusive_maximum = 7;
-  double minimum = 8;
-  bool exclusive_minimum = 9;
-  int64 max_length = 10;
-  int64 min_length = 11;
-  string pattern = 12;
-  int64 max_items = 13;
-  int64 min_items = 14;
-  bool unique_items = 15;
-  repeated Any enum = 16;
-  double multiple_of = 17;
-  repeated NamedAny vendor_extension = 18;
-}
-
-message Properties {
-  repeated NamedSchema additional_properties = 1;
-}
-
-message QueryParameterSubSchema {
-  // Determines whether or not this parameter is required or optional.
-  bool required = 1;
-  // Determines the location of the parameter.
-  string in = 2;
-  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
-  string description = 3;
-  // The name of the parameter.
-  string name = 4;
-  // allows sending a parameter by name only or with an empty value.
-  bool allow_empty_value = 5;
-  string type = 6;
-  string format = 7;
-  PrimitivesItems items = 8;
-  string collection_format = 9;
-  Any default = 10;
-  double maximum = 11;
-  bool exclusive_maximum = 12;
-  double minimum = 13;
-  bool exclusive_minimum = 14;
-  int64 max_length = 15;
-  int64 min_length = 16;
-  string pattern = 17;
-  int64 max_items = 18;
-  int64 min_items = 19;
-  bool unique_items = 20;
-  repeated Any enum = 21;
-  double multiple_of = 22;
-  repeated NamedAny vendor_extension = 23;
-}
-
-message Response {
-  string description = 1;
-  SchemaItem schema = 2;
-  Headers headers = 3;
-  Examples examples = 4;
-  repeated NamedAny vendor_extension = 5;
-}
-
-// One or more JSON representations for parameters
-message ResponseDefinitions {
-  repeated NamedResponse additional_properties = 1;
-}
-
-message ResponseValue {
-  oneof oneof {
-    Response response = 1;
-    JsonReference json_reference = 2;
-  }
-}
-
-// Response objects names can either be any valid HTTP status code or 'default'.
-message Responses {
-  repeated NamedResponseValue response_code = 1;
-  repeated NamedAny vendor_extension = 2;
-}
-
-// A deterministic version of a JSON Schema object.
-message Schema {
-  string _ref = 1;
-  string format = 2;
-  string title = 3;
-  string description = 4;
-  Any default = 5;
-  double multiple_of = 6;
-  double maximum = 7;
-  bool exclusive_maximum = 8;
-  double minimum = 9;
-  bool exclusive_minimum = 10;
-  int64 max_length = 11;
-  int64 min_length = 12;
-  string pattern = 13;
-  int64 max_items = 14;
-  int64 min_items = 15;
-  bool unique_items = 16;
-  int64 max_properties = 17;
-  int64 min_properties = 18;
-  repeated string required = 19;
-  repeated Any enum = 20;
-  AdditionalPropertiesItem additional_properties = 21;
-  TypeItem type = 22;
-  ItemsItem items = 23;
-  repeated Schema all_of = 24;
-  Properties properties = 25;
-  string discriminator = 26;
-  bool read_only = 27;
-  Xml xml = 28;
-  ExternalDocs external_docs = 29;
-  Any example = 30;
-  repeated NamedAny vendor_extension = 31;
-}
-
-message SchemaItem {
-  oneof oneof {
-    Schema schema = 1;
-    FileSchema file_schema = 2;
-  }
-}
-
-message SecurityDefinitions {
-  repeated NamedSecurityDefinitionsItem additional_properties = 1;
-}
-
-message SecurityDefinitionsItem {
-  oneof oneof {
-    BasicAuthenticationSecurity basic_authentication_security = 1;
-    ApiKeySecurity api_key_security = 2;
-    Oauth2ImplicitSecurity oauth2_implicit_security = 3;
-    Oauth2PasswordSecurity oauth2_password_security = 4;
-    Oauth2ApplicationSecurity oauth2_application_security = 5;
-    Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
-  }
-}
-
-message SecurityRequirement {
-  repeated NamedStringArray additional_properties = 1;
-}
-
-message StringArray {
-  repeated string value = 1;
-}
-
-message Tag {
-  string name = 1;
-  string description = 2;
-  ExternalDocs external_docs = 3;
-  repeated NamedAny vendor_extension = 4;
-}
-
-message TypeItem {
-  repeated string value = 1;
-}
-
-// Any property starting with x- is valid.
-message VendorExtension {
-  repeated NamedAny additional_properties = 1;
-}
-
-message Xml {
-  string name = 1;
-  string namespace = 2;
-  string prefix = 3;
-  bool attribute = 4;
-  bool wrapped = 5;
-  repeated NamedAny vendor_extension = 6;
-}
-
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
deleted file mode 100644
index 836fb32..0000000
--- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# OpenAPI v2 Protocol Buffer Models
-
-This directory contains a Protocol Buffer-language model
-and related code for supporting OpenAPI v2.
-
-Gnostic applications and plugins can use OpenAPIv2.proto
-to generate Protocol Buffer support code for their preferred languages.
-
-OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI 
-descriptions into the Protocol Buffer-based datastructures 
-generated from OpenAPIv2.proto.
-
-OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic 
-compiler generator, and OpenAPIv2.pb.go is generated by 
-protoc, the Protocol Buffer compiler, and protoc-gen-go, the
-Protocol Buffer Go code generation plugin.
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
deleted file mode 100644
index 2815a26..0000000
--- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
+++ /dev/null
@@ -1,1610 +0,0 @@
-{
-  "title": "A JSON Schema for Swagger 2.0 API.",
-  "id": "http://swagger.io/v2/schema.json#",
-  "$schema": "http://json-schema.org/draft-04/schema#",
-  "type": "object",
-  "required": [
-    "swagger",
-    "info",
-    "paths"
-  ],
-  "additionalProperties": false,
-  "patternProperties": {
-    "^x-": {
-      "$ref": "#/definitions/vendorExtension"
-    }
-  },
-  "properties": {
-    "swagger": {
-      "type": "string",
-      "enum": [
-        "2.0"
-      ],
-      "description": "The Swagger version of this document."
-    },
-    "info": {
-      "$ref": "#/definitions/info"
-    },
-    "host": {
-      "type": "string",
-      "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
-      "description": "The host (name or ip) of the API. Example: 'swagger.io'"
-    },
-    "basePath": {
-      "type": "string",
-      "pattern": "^/",
-      "description": "The base path to the API. Example: '/api'."
-    },
-    "schemes": {
-      "$ref": "#/definitions/schemesList"
-    },
-    "consumes": {
-      "description": "A list of MIME types accepted by the API.",
-      "allOf": [
-        {
-          "$ref": "#/definitions/mediaTypeList"
-        }
-      ]
-    },
-    "produces": {
-      "description": "A list of MIME types the API can produce.",
-      "allOf": [
-        {
-          "$ref": "#/definitions/mediaTypeList"
-        }
-      ]
-    },
-    "paths": {
-      "$ref": "#/definitions/paths"
-    },
-    "definitions": {
-      "$ref": "#/definitions/definitions"
-    },
-    "parameters": {
-      "$ref": "#/definitions/parameterDefinitions"
-    },
-    "responses": {
-      "$ref": "#/definitions/responseDefinitions"
-    },
-    "security": {
-      "$ref": "#/definitions/security"
-    },
-    "securityDefinitions": {
-      "$ref": "#/definitions/securityDefinitions"
-    },
-    "tags": {
-      "type": "array",
-      "items": {
-        "$ref": "#/definitions/tag"
-      },
-      "uniqueItems": true
-    },
-    "externalDocs": {
-      "$ref": "#/definitions/externalDocs"
-    }
-  },
-  "definitions": {
-    "info": {
-      "type": "object",
-      "description": "General information about the API.",
-      "required": [
-        "version",
-        "title"
-      ],
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "title": {
-          "type": "string",
-          "description": "A unique and precise title of the API."
-        },
-        "version": {
-          "type": "string",
-          "description": "A semantic version number of the API."
-        },
-        "description": {
-          "type": "string",
-          "description": "A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed."
-        },
-        "termsOfService": {
-          "type": "string",
-          "description": "The terms of service for the API."
-        },
-        "contact": {
-          "$ref": "#/definitions/contact"
-        },
-        "license": {
-          "$ref": "#/definitions/license"
-        }
-      }
-    },
-    "contact": {
-      "type": "object",
-      "description": "Contact information for the owners of the API.",
-      "additionalProperties": false,
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The identifying name of the contact person/organization."
-        },
-        "url": {
-          "type": "string",
-          "description": "The URL pointing to the contact information.",
-          "format": "uri"
-        },
-        "email": {
-          "type": "string",
-          "description": "The email address of the contact person/organization.",
-          "format": "email"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "license": {
-      "type": "object",
-      "required": [
-        "name"
-      ],
-      "additionalProperties": false,
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The name of the license type. It's encouraged to use an OSI compatible license."
-        },
-        "url": {
-          "type": "string",
-          "description": "The URL pointing to the license.",
-          "format": "uri"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "paths": {
-      "type": "object",
-      "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        },
-        "^/": {
-          "$ref": "#/definitions/pathItem"
-        }
-      },
-      "additionalProperties": false
-    },
-    "definitions": {
-      "type": "object",
-      "additionalProperties": {
-        "$ref": "#/definitions/schema"
-      },
-      "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
-    },
-    "parameterDefinitions": {
-      "type": "object",
-      "additionalProperties": {
-        "$ref": "#/definitions/parameter"
-      },
-      "description": "One or more JSON representations for parameters"
-    },
-    "responseDefinitions": {
-      "type": "object",
-      "additionalProperties": {
-        "$ref": "#/definitions/response"
-      },
-      "description": "One or more JSON representations for parameters"
-    },
-    "externalDocs": {
-      "type": "object",
-      "additionalProperties": false,
-      "description": "information about external documentation",
-      "required": [
-        "url"
-      ],
-      "properties": {
-        "description": {
-          "type": "string"
-        },
-        "url": {
-          "type": "string",
-          "format": "uri"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "examples": {
-      "type": "object",
-      "additionalProperties": true
-    },
-    "mimeType": {
-      "type": "string",
-      "description": "The MIME type of the HTTP message."
-    },
-    "operation": {
-      "type": "object",
-      "required": [
-        "responses"
-      ],
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "tags": {
-          "type": "array",
-          "items": {
-            "type": "string"
-          },
-          "uniqueItems": true
-        },
-        "summary": {
-          "type": "string",
-          "description": "A brief summary of the operation."
-        },
-        "description": {
-          "type": "string",
-          "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
-        },
-        "externalDocs": {
-          "$ref": "#/definitions/externalDocs"
-        },
-        "operationId": {
-          "type": "string",
-          "description": "A unique identifier of the operation."
-        },
-        "produces": {
-          "description": "A list of MIME types the API can produce.",
-          "allOf": [
-            {
-              "$ref": "#/definitions/mediaTypeList"
-            }
-          ]
-        },
-        "consumes": {
-          "description": "A list of MIME types the API can consume.",
-          "allOf": [
-            {
-              "$ref": "#/definitions/mediaTypeList"
-            }
-          ]
-        },
-        "parameters": {
-          "$ref": "#/definitions/parametersList"
-        },
-        "responses": {
-          "$ref": "#/definitions/responses"
-        },
-        "schemes": {
-          "$ref": "#/definitions/schemesList"
-        },
-        "deprecated": {
-          "type": "boolean",
-          "default": false
-        },
-        "security": {
-          "$ref": "#/definitions/security"
-        }
-      }
-    },
-    "pathItem": {
-      "type": "object",
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "$ref": {
-          "type": "string"
-        },
-        "get": {
-          "$ref": "#/definitions/operation"
-        },
-        "put": {
-          "$ref": "#/definitions/operation"
-        },
-        "post": {
-          "$ref": "#/definitions/operation"
-        },
-        "delete": {
-          "$ref": "#/definitions/operation"
-        },
-        "options": {
-          "$ref": "#/definitions/operation"
-        },
-        "head": {
-          "$ref": "#/definitions/operation"
-        },
-        "patch": {
-          "$ref": "#/definitions/operation"
-        },
-        "parameters": {
-          "$ref": "#/definitions/parametersList"
-        }
-      }
-    },
-    "responses": {
-      "type": "object",
-      "description": "Response objects names can either be any valid HTTP status code or 'default'.",
-      "minProperties": 1,
-      "additionalProperties": false,
-      "patternProperties": {
-        "^([0-9]{3})$|^(default)$": {
-          "$ref": "#/definitions/responseValue"
-        },
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "not": {
-        "type": "object",
-        "additionalProperties": false,
-        "patternProperties": {
-          "^x-": {
-            "$ref": "#/definitions/vendorExtension"
-          }
-        }
-      }
-    },
-    "responseValue": {
-      "oneOf": [
-        {
-          "$ref": "#/definitions/response"
-        },
-        {
-          "$ref": "#/definitions/jsonReference"
-        }
-      ]
-    },
-    "response": {
-      "type": "object",
-      "required": [
-        "description"
-      ],
-      "properties": {
-        "description": {
-          "type": "string"
-        },
-        "schema": {
-          "oneOf": [
-            {
-              "$ref": "#/definitions/schema"
-            },
-            {
-              "$ref": "#/definitions/fileSchema"
-            }
-          ]
-        },
-        "headers": {
-          "$ref": "#/definitions/headers"
-        },
-        "examples": {
-          "$ref": "#/definitions/examples"
-        }
-      },
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "headers": {
-      "type": "object",
-      "additionalProperties": {
-        "$ref": "#/definitions/header"
-      }
-    },
-    "header": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "integer",
-            "boolean",
-            "array"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormat"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "vendorExtension": {
-      "description": "Any property starting with x- is valid.",
-      "additionalProperties": true,
-      "additionalItems": true
-    },
-    "bodyParameter": {
-      "type": "object",
-      "required": [
-        "name",
-        "in",
-        "schema"
-      ],
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "description": {
-          "type": "string",
-          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the parameter."
-        },
-        "in": {
-          "type": "string",
-          "description": "Determines the location of the parameter.",
-          "enum": [
-            "body"
-          ]
-        },
-        "required": {
-          "type": "boolean",
-          "description": "Determines whether or not this parameter is required or optional.",
-          "default": false
-        },
-        "schema": {
-          "$ref": "#/definitions/schema"
-        }
-      },
-      "additionalProperties": false
-    },
-    "headerParameterSubSchema": {
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "required": {
-          "type": "boolean",
-          "description": "Determines whether or not this parameter is required or optional.",
-          "default": false
-        },
-        "in": {
-          "type": "string",
-          "description": "Determines the location of the parameter.",
-          "enum": [
-            "header"
-          ]
-        },
-        "description": {
-          "type": "string",
-          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the parameter."
-        },
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "boolean",
-            "integer",
-            "array"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormat"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        }
-      }
-    },
-    "queryParameterSubSchema": {
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "required": {
-          "type": "boolean",
-          "description": "Determines whether or not this parameter is required or optional.",
-          "default": false
-        },
-        "in": {
-          "type": "string",
-          "description": "Determines the location of the parameter.",
-          "enum": [
-            "query"
-          ]
-        },
-        "description": {
-          "type": "string",
-          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the parameter."
-        },
-        "allowEmptyValue": {
-          "type": "boolean",
-          "default": false,
-          "description": "allows sending a parameter by name only or with an empty value."
-        },
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "boolean",
-            "integer",
-            "array"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormatWithMulti"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        }
-      }
-    },
-    "formDataParameterSubSchema": {
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "required": {
-          "type": "boolean",
-          "description": "Determines whether or not this parameter is required or optional.",
-          "default": false
-        },
-        "in": {
-          "type": "string",
-          "description": "Determines the location of the parameter.",
-          "enum": [
-            "formData"
-          ]
-        },
-        "description": {
-          "type": "string",
-          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the parameter."
-        },
-        "allowEmptyValue": {
-          "type": "boolean",
-          "default": false,
-          "description": "allows sending a parameter by name only or with an empty value."
-        },
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "boolean",
-            "integer",
-            "array",
-            "file"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormatWithMulti"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        }
-      }
-    },
-    "pathParameterSubSchema": {
-      "additionalProperties": false,
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "required": [
-        "required"
-      ],
-      "properties": {
-        "required": {
-          "type": "boolean",
-          "enum": [
-            true
-          ],
-          "description": "Determines whether or not this parameter is required or optional."
-        },
-        "in": {
-          "type": "string",
-          "description": "Determines the location of the parameter.",
-          "enum": [
-            "path"
-          ]
-        },
-        "description": {
-          "type": "string",
-          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
-        },
-        "name": {
-          "type": "string",
-          "description": "The name of the parameter."
-        },
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "boolean",
-            "integer",
-            "array"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormat"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        }
-      }
-    },
-    "nonBodyParameter": {
-      "type": "object",
-      "required": [
-        "name",
-        "in",
-        "type"
-      ],
-      "oneOf": [
-        {
-          "$ref": "#/definitions/headerParameterSubSchema"
-        },
-        {
-          "$ref": "#/definitions/formDataParameterSubSchema"
-        },
-        {
-          "$ref": "#/definitions/queryParameterSubSchema"
-        },
-        {
-          "$ref": "#/definitions/pathParameterSubSchema"
-        }
-      ]
-    },
-    "parameter": {
-      "oneOf": [
-        {
-          "$ref": "#/definitions/bodyParameter"
-        },
-        {
-          "$ref": "#/definitions/nonBodyParameter"
-        }
-      ]
-    },
-    "schema": {
-      "type": "object",
-      "description": "A deterministic version of a JSON Schema object.",
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "properties": {
-        "$ref": {
-          "type": "string"
-        },
-        "format": {
-          "type": "string"
-        },
-        "title": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
-        },
-        "description": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
-        },
-        "default": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
-        },
-        "multipleOf": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
-        },
-        "maximum": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
-        },
-        "minLength": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
-        },
-        "pattern": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
-        },
-        "maxItems": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
-        },
-        "minItems": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
-        },
-        "uniqueItems": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
-        },
-        "maxProperties": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
-        },
-        "minProperties": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
-        },
-        "required": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
-        },
-        "enum": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
-        },
-        "additionalProperties": {
-          "oneOf": [
-            {
-              "$ref": "#/definitions/schema"
-            },
-            {
-              "type": "boolean"
-            }
-          ],
-          "default": {}
-        },
-        "type": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
-        },
-        "items": {
-          "anyOf": [
-            {
-              "$ref": "#/definitions/schema"
-            },
-            {
-              "type": "array",
-              "minItems": 1,
-              "items": {
-                "$ref": "#/definitions/schema"
-              }
-            }
-          ],
-          "default": {}
-        },
-        "allOf": {
-          "type": "array",
-          "minItems": 1,
-          "items": {
-            "$ref": "#/definitions/schema"
-          }
-        },
-        "properties": {
-          "type": "object",
-          "additionalProperties": {
-            "$ref": "#/definitions/schema"
-          },
-          "default": {}
-        },
-        "discriminator": {
-          "type": "string"
-        },
-        "readOnly": {
-          "type": "boolean",
-          "default": false
-        },
-        "xml": {
-          "$ref": "#/definitions/xml"
-        },
-        "externalDocs": {
-          "$ref": "#/definitions/externalDocs"
-        },
-        "example": {}
-      },
-      "additionalProperties": false
-    },
-    "fileSchema": {
-      "type": "object",
-      "description": "A deterministic version of a JSON Schema object.",
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      },
-      "required": [
-        "type"
-      ],
-      "properties": {
-        "format": {
-          "type": "string"
-        },
-        "title": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
-        },
-        "description": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
-        },
-        "default": {
-          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
-        },
-        "required": {
-          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
-        },
-        "type": {
-          "type": "string",
-          "enum": [
-            "file"
-          ]
-        },
-        "readOnly": {
-          "type": "boolean",
-          "default": false
-        },
-        "externalDocs": {
-          "$ref": "#/definitions/externalDocs"
-        },
-        "example": {}
-      },
-      "additionalProperties": false
-    },
-    "primitivesItems": {
-      "type": "object",
-      "additionalProperties": false,
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "string",
-            "number",
-            "integer",
-            "boolean",
-            "array"
-          ]
-        },
-        "format": {
-          "type": "string"
-        },
-        "items": {
-          "$ref": "#/definitions/primitivesItems"
-        },
-        "collectionFormat": {
-          "$ref": "#/definitions/collectionFormat"
-        },
-        "default": {
-          "$ref": "#/definitions/default"
-        },
-        "maximum": {
-          "$ref": "#/definitions/maximum"
-        },
-        "exclusiveMaximum": {
-          "$ref": "#/definitions/exclusiveMaximum"
-        },
-        "minimum": {
-          "$ref": "#/definitions/minimum"
-        },
-        "exclusiveMinimum": {
-          "$ref": "#/definitions/exclusiveMinimum"
-        },
-        "maxLength": {
-          "$ref": "#/definitions/maxLength"
-        },
-        "minLength": {
-          "$ref": "#/definitions/minLength"
-        },
-        "pattern": {
-          "$ref": "#/definitions/pattern"
-        },
-        "maxItems": {
-          "$ref": "#/definitions/maxItems"
-        },
-        "minItems": {
-          "$ref": "#/definitions/minItems"
-        },
-        "uniqueItems": {
-          "$ref": "#/definitions/uniqueItems"
-        },
-        "enum": {
-          "$ref": "#/definitions/enum"
-        },
-        "multipleOf": {
-          "$ref": "#/definitions/multipleOf"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "security": {
-      "type": "array",
-      "items": {
-        "$ref": "#/definitions/securityRequirement"
-      },
-      "uniqueItems": true
-    },
-    "securityRequirement": {
-      "type": "object",
-      "additionalProperties": {
-        "type": "array",
-        "items": {
-          "type": "string"
-        },
-        "uniqueItems": true
-      }
-    },
-    "xml": {
-      "type": "object",
-      "additionalProperties": false,
-      "properties": {
-        "name": {
-          "type": "string"
-        },
-        "namespace": {
-          "type": "string"
-        },
-        "prefix": {
-          "type": "string"
-        },
-        "attribute": {
-          "type": "boolean",
-          "default": false
-        },
-        "wrapped": {
-          "type": "boolean",
-          "default": false
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "tag": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "name"
-      ],
-      "properties": {
-        "name": {
-          "type": "string"
-        },
-        "description": {
-          "type": "string"
-        },
-        "externalDocs": {
-          "$ref": "#/definitions/externalDocs"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "securityDefinitions": {
-      "type": "object",
-      "additionalProperties": {
-        "oneOf": [
-          {
-            "$ref": "#/definitions/basicAuthenticationSecurity"
-          },
-          {
-            "$ref": "#/definitions/apiKeySecurity"
-          },
-          {
-            "$ref": "#/definitions/oauth2ImplicitSecurity"
-          },
-          {
-            "$ref": "#/definitions/oauth2PasswordSecurity"
-          },
-          {
-            "$ref": "#/definitions/oauth2ApplicationSecurity"
-          },
-          {
-            "$ref": "#/definitions/oauth2AccessCodeSecurity"
-          }
-        ]
-      }
-    },
-    "basicAuthenticationSecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "basic"
-          ]
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "apiKeySecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type",
-        "name",
-        "in"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "apiKey"
-          ]
-        },
-        "name": {
-          "type": "string"
-        },
-        "in": {
-          "type": "string",
-          "enum": [
-            "header",
-            "query"
-          ]
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "oauth2ImplicitSecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type",
-        "flow",
-        "authorizationUrl"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "oauth2"
-          ]
-        },
-        "flow": {
-          "type": "string",
-          "enum": [
-            "implicit"
-          ]
-        },
-        "scopes": {
-          "$ref": "#/definitions/oauth2Scopes"
-        },
-        "authorizationUrl": {
-          "type": "string",
-          "format": "uri"
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "oauth2PasswordSecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type",
-        "flow",
-        "tokenUrl"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "oauth2"
-          ]
-        },
-        "flow": {
-          "type": "string",
-          "enum": [
-            "password"
-          ]
-        },
-        "scopes": {
-          "$ref": "#/definitions/oauth2Scopes"
-        },
-        "tokenUrl": {
-          "type": "string",
-          "format": "uri"
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "oauth2ApplicationSecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type",
-        "flow",
-        "tokenUrl"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "oauth2"
-          ]
-        },
-        "flow": {
-          "type": "string",
-          "enum": [
-            "application"
-          ]
-        },
-        "scopes": {
-          "$ref": "#/definitions/oauth2Scopes"
-        },
-        "tokenUrl": {
-          "type": "string",
-          "format": "uri"
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "oauth2AccessCodeSecurity": {
-      "type": "object",
-      "additionalProperties": false,
-      "required": [
-        "type",
-        "flow",
-        "authorizationUrl",
-        "tokenUrl"
-      ],
-      "properties": {
-        "type": {
-          "type": "string",
-          "enum": [
-            "oauth2"
-          ]
-        },
-        "flow": {
-          "type": "string",
-          "enum": [
-            "accessCode"
-          ]
-        },
-        "scopes": {
-          "$ref": "#/definitions/oauth2Scopes"
-        },
-        "authorizationUrl": {
-          "type": "string",
-          "format": "uri"
-        },
-        "tokenUrl": {
-          "type": "string",
-          "format": "uri"
-        },
-        "description": {
-          "type": "string"
-        }
-      },
-      "patternProperties": {
-        "^x-": {
-          "$ref": "#/definitions/vendorExtension"
-        }
-      }
-    },
-    "oauth2Scopes": {
-      "type": "object",
-      "additionalProperties": {
-        "type": "string"
-      }
-    },
-    "mediaTypeList": {
-      "type": "array",
-      "items": {
-        "$ref": "#/definitions/mimeType"
-      },
-      "uniqueItems": true
-    },
-    "parametersList": {
-      "type": "array",
-      "description": "The parameters needed to send a valid API call.",
-      "additionalItems": false,
-      "items": {
-        "oneOf": [
-          {
-            "$ref": "#/definitions/parameter"
-          },
-          {
-            "$ref": "#/definitions/jsonReference"
-          }
-        ]
-      },
-      "uniqueItems": true
-    },
-    "schemesList": {
-      "type": "array",
-      "description": "The transfer protocol of the API.",
-      "items": {
-        "type": "string",
-        "enum": [
-          "http",
-          "https",
-          "ws",
-          "wss"
-        ]
-      },
-      "uniqueItems": true
-    },
-    "collectionFormat": {
-      "type": "string",
-      "enum": [
-        "csv",
-        "ssv",
-        "tsv",
-        "pipes"
-      ],
-      "default": "csv"
-    },
-    "collectionFormatWithMulti": {
-      "type": "string",
-      "enum": [
-        "csv",
-        "ssv",
-        "tsv",
-        "pipes",
-        "multi"
-      ],
-      "default": "csv"
-    },
-    "title": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
-    },
-    "description": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
-    },
-    "default": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
-    },
-    "multipleOf": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
-    },
-    "maximum": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
-    },
-    "exclusiveMaximum": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
-    },
-    "minimum": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
-    },
-    "exclusiveMinimum": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
-    },
-    "maxLength": {
-      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
-    },
-    "minLength": {
-      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
-    },
-    "pattern": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
-    },
-    "maxItems": {
-      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
-    },
-    "minItems": {
-      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
-    },
-    "uniqueItems": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
-    },
-    "enum": {
-      "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
-    },
-    "jsonReference": {
-      "type": "object",
-      "required": [
-        "$ref"
-      ],
-      "additionalProperties": false,
-      "properties": {
-        "$ref": {
-          "type": "string"
-        },
-        "description": {
-          "type": "string"
-        }
-      }
-    }
-  }
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md
deleted file mode 100644
index 848b16c..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Compiler support code
-
-This directory contains compiler support code used by Gnostic and Gnostic extensions.
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go
deleted file mode 100644
index a64c1b7..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/context.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-// Context contains state of the compiler as it traverses a document.
-type Context struct {
-	Parent            *Context
-	Name              string
-	ExtensionHandlers *[]ExtensionHandler
-}
-
-// NewContextWithExtensions returns a new object representing the compiler state
-func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
-	return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
-}
-
-// NewContext returns a new object representing the compiler state
-func NewContext(name string, parent *Context) *Context {
-	if parent != nil {
-		return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
-	}
-	return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
-}
-
-// Description returns a text description of the compiler state
-func (context *Context) Description() string {
-	if context.Parent != nil {
-		return context.Parent.Description() + "." + context.Name
-	}
-	return context.Name
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go
deleted file mode 100644
index d8672c1..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/error.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-// Error represents compiler errors and their location in the document.
-type Error struct {
-	Context *Context
-	Message string
-}
-
-// NewError creates an Error.
-func NewError(context *Context, message string) *Error {
-	return &Error{Context: context, Message: message}
-}
-
-// Error returns the string value of an Error.
-func (err *Error) Error() string {
-	if err.Context == nil {
-		return "ERROR " + err.Message
-	}
-	return "ERROR " + err.Context.Description() + " " + err.Message
-}
-
-// ErrorGroup is a container for groups of Error values.
-type ErrorGroup struct {
-	Errors []error
-}
-
-// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
-func NewErrorGroupOrNil(errors []error) error {
-	if len(errors) == 0 {
-		return nil
-	} else if len(errors) == 1 {
-		return errors[0]
-	} else {
-		return &ErrorGroup{Errors: errors}
-	}
-}
-
-func (group *ErrorGroup) Error() string {
-	result := ""
-	for i, err := range group.Errors {
-		if i > 0 {
-			result += "\n"
-		}
-		result += err.Error()
-	}
-	return result
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
deleted file mode 100644
index 1f85b65..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-import (
-	"bytes"
-	"fmt"
-	"os/exec"
-
-	"strings"
-
-	"errors"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes/any"
-	ext_plugin "github.com/googleapis/gnostic/extensions"
-	yaml "gopkg.in/yaml.v2"
-)
-
-// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
-type ExtensionHandler struct {
-	Name string
-}
-
-// HandleExtension calls a binary extension handler.
-func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
-	handled := false
-	var errFromPlugin error
-	var outFromPlugin *any.Any
-
-	if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
-		for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
-			outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
-			if outFromPlugin == nil {
-				continue
-			} else {
-				handled = true
-				break
-			}
-		}
-	}
-	return handled, outFromPlugin, errFromPlugin
-}
-
-func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
-	if extensionHandlers.Name != "" {
-		binary, _ := yaml.Marshal(in)
-
-		request := &ext_plugin.ExtensionHandlerRequest{}
-
-		version := &ext_plugin.Version{}
-		version.Major = 0
-		version.Minor = 1
-		version.Patch = 0
-		request.CompilerVersion = version
-
-		request.Wrapper = &ext_plugin.Wrapper{}
-
-		request.Wrapper.Version = "v2"
-		request.Wrapper.Yaml = string(binary)
-		request.Wrapper.ExtensionName = extensionName
-
-		requestBytes, _ := proto.Marshal(request)
-		cmd := exec.Command(extensionHandlers.Name)
-		cmd.Stdin = bytes.NewReader(requestBytes)
-		output, err := cmd.Output()
-
-		if err != nil {
-			fmt.Printf("Error: %+v\n", err)
-			return nil, err
-		}
-		response := &ext_plugin.ExtensionHandlerResponse{}
-		err = proto.Unmarshal(output, response)
-		if err != nil {
-			fmt.Printf("Error: %+v\n", err)
-			fmt.Printf("%s\n", string(output))
-			return nil, err
-		}
-		if !response.Handled {
-			return nil, nil
-		}
-		if len(response.Error) != 0 {
-			message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
-			return nil, errors.New(message)
-		}
-		return response.Value, nil
-	}
-	return nil, nil
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
deleted file mode 100644
index 76df635..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-import (
-	"fmt"
-	"gopkg.in/yaml.v2"
-	"regexp"
-	"sort"
-	"strconv"
-)
-
-// compiler helper functions, usually called from generated code
-
-// UnpackMap gets a yaml.MapSlice if possible.
-func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
-	m, ok := in.(yaml.MapSlice)
-	if ok {
-		return m, true
-	}
-	// do we have an empty array?
-	a, ok := in.([]interface{})
-	if ok && len(a) == 0 {
-		// if so, return an empty map
-		return yaml.MapSlice{}, true
-	}
-	return nil, false
-}
-
-// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
-func SortedKeysForMap(m yaml.MapSlice) []string {
-	keys := make([]string, 0)
-	for _, item := range m {
-		keys = append(keys, item.Key.(string))
-	}
-	sort.Strings(keys)
-	return keys
-}
-
-// MapHasKey returns true if a yaml.MapSlice contains a specified key.
-func MapHasKey(m yaml.MapSlice, key string) bool {
-	for _, item := range m {
-		itemKey, ok := item.Key.(string)
-		if ok && key == itemKey {
-			return true
-		}
-	}
-	return false
-}
-
-// MapValueForKey gets the value of a map value for a specified key.
-func MapValueForKey(m yaml.MapSlice, key string) interface{} {
-	for _, item := range m {
-		itemKey, ok := item.Key.(string)
-		if ok && key == itemKey {
-			return item.Value
-		}
-	}
-	return nil
-}
-
-// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
-func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
-	stringArray := make([]string, 0)
-	for _, item := range interfaceArray {
-		v, ok := item.(string)
-		if ok {
-			stringArray = append(stringArray, v)
-		}
-	}
-	return stringArray
-}
-
-// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
-func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
-	missingKeys := make([]string, 0)
-	for _, k := range requiredKeys {
-		if !MapHasKey(m, k) {
-			missingKeys = append(missingKeys, k)
-		}
-	}
-	return missingKeys
-}
-
-// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
-func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
-	invalidKeys := make([]string, 0)
-	for _, item := range m {
-		itemKey, ok := item.Key.(string)
-		if ok {
-			key := itemKey
-			found := false
-			// does the key match an allowed key?
-			for _, allowedKey := range allowedKeys {
-				if key == allowedKey {
-					found = true
-					break
-				}
-			}
-			if !found {
-				// does the key match an allowed pattern?
-				for _, allowedPattern := range allowedPatterns {
-					if allowedPattern.MatchString(key) {
-						found = true
-						break
-					}
-				}
-				if !found {
-					invalidKeys = append(invalidKeys, key)
-				}
-			}
-		}
-	}
-	return invalidKeys
-}
-
-// DescribeMap describes a map (for debugging purposes).
-func DescribeMap(in interface{}, indent string) string {
-	description := ""
-	m, ok := in.(map[string]interface{})
-	if ok {
-		keys := make([]string, 0)
-		for k := range m {
-			keys = append(keys, k)
-		}
-		sort.Strings(keys)
-		for _, k := range keys {
-			v := m[k]
-			description += fmt.Sprintf("%s%s:\n", indent, k)
-			description += DescribeMap(v, indent+"  ")
-		}
-		return description
-	}
-	a, ok := in.([]interface{})
-	if ok {
-		for i, v := range a {
-			description += fmt.Sprintf("%s%d:\n", indent, i)
-			description += DescribeMap(v, indent+"  ")
-		}
-		return description
-	}
-	description += fmt.Sprintf("%s%+v\n", indent, in)
-	return description
-}
-
-// PluralProperties returns the string "properties" pluralized.
-func PluralProperties(count int) string {
-	if count == 1 {
-		return "property"
-	}
-	return "properties"
-}
-
-// StringArrayContainsValue returns true if a string array contains a specified value.
-func StringArrayContainsValue(array []string, value string) bool {
-	for _, item := range array {
-		if item == value {
-			return true
-		}
-	}
-	return false
-}
-
-// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
-func StringArrayContainsValues(array []string, values []string) bool {
-	for _, value := range values {
-		if !StringArrayContainsValue(array, value) {
-			return false
-		}
-	}
-	return true
-}
-
-// StringValue returns the string value of an item.
-func StringValue(item interface{}) (value string, ok bool) {
-	value, ok = item.(string)
-	if ok {
-		return value, ok
-	}
-	intValue, ok := item.(int)
-	if ok {
-		return strconv.Itoa(intValue), true
-	}
-	return "", false
-}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go
deleted file mode 100644
index 9713a21..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/main.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package compiler provides support functions to generated compiler code.
-package compiler
diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go
deleted file mode 100644
index c954a2d..0000000
--- a/vendor/github.com/googleapis/gnostic/compiler/reader.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compiler
-
-import (
-	"errors"
-	"fmt"
-	"gopkg.in/yaml.v2"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"net/url"
-	"path/filepath"
-	"strings"
-)
-
-var fileCache map[string][]byte
-var infoCache map[string]interface{}
-var count int64
-
-var verboseReader = false
-
-func initializeFileCache() {
-	if fileCache == nil {
-		fileCache = make(map[string][]byte, 0)
-	}
-}
-
-func initializeInfoCache() {
-	if infoCache == nil {
-		infoCache = make(map[string]interface{}, 0)
-	}
-}
-
-// FetchFile gets a specified file from the local filesystem or a remote location.
-func FetchFile(fileurl string) ([]byte, error) {
-	initializeFileCache()
-	bytes, ok := fileCache[fileurl]
-	if ok {
-		if verboseReader {
-			log.Printf("Cache hit %s", fileurl)
-		}
-		return bytes, nil
-	}
-	if verboseReader {
-		log.Printf("Fetching %s", fileurl)
-	}
-	response, err := http.Get(fileurl)
-	if err != nil {
-		return nil, err
-	}
-	if response.StatusCode != 200 {
-		return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
-	}
-	defer response.Body.Close()
-	bytes, err = ioutil.ReadAll(response.Body)
-	if err == nil {
-		fileCache[fileurl] = bytes
-	}
-	return bytes, err
-}
-
-// ReadBytesForFile reads the bytes of a file.
-func ReadBytesForFile(filename string) ([]byte, error) {
-	// is the filename a url?
-	fileurl, _ := url.Parse(filename)
-	if fileurl.Scheme != "" {
-		// yes, fetch it
-		bytes, err := FetchFile(filename)
-		if err != nil {
-			return nil, err
-		}
-		return bytes, nil
-	}
-	// no, it's a local filename
-	bytes, err := ioutil.ReadFile(filename)
-	if err != nil {
-		return nil, err
-	}
-	return bytes, nil
-}
-
-// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
-func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
-	initializeInfoCache()
-	cachedInfo, ok := infoCache[filename]
-	if ok {
-		if verboseReader {
-			log.Printf("Cache hit info for file %s", filename)
-		}
-		return cachedInfo, nil
-	}
-	if verboseReader {
-		log.Printf("Reading info for file %s", filename)
-	}
-	var info yaml.MapSlice
-	err := yaml.Unmarshal(bytes, &info)
-	if err != nil {
-		return nil, err
-	}
-	if len(filename) > 0 {
-		infoCache[filename] = info
-	}
-	return info, nil
-}
-
-// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
-func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
-	initializeInfoCache()
-	{
-		info, ok := infoCache[ref]
-		if ok {
-			if verboseReader {
-				log.Printf("Cache hit for ref %s#%s", basefile, ref)
-			}
-			return info, nil
-		}
-	}
-	if verboseReader {
-		log.Printf("Reading info for ref %s#%s", basefile, ref)
-	}
-	count = count + 1
-	basedir, _ := filepath.Split(basefile)
-	parts := strings.Split(ref, "#")
-	var filename string
-	if parts[0] != "" {
-		filename = basedir + parts[0]
-	} else {
-		filename = basefile
-	}
-	bytes, err := ReadBytesForFile(filename)
-	if err != nil {
-		return nil, err
-	}
-	info, err := ReadInfoFromBytes(filename, bytes)
-	if err != nil {
-		log.Printf("File error: %v\n", err)
-	} else {
-		if len(parts) > 1 {
-			path := strings.Split(parts[1], "/")
-			for i, key := range path {
-				if i > 0 {
-					m, ok := info.(yaml.MapSlice)
-					if ok {
-						found := false
-						for _, section := range m {
-							if section.Key == key {
-								info = section.Value
-								found = true
-							}
-						}
-						if !found {
-							infoCache[ref] = nil
-							return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
-						}
-					}
-				}
-			}
-		}
-	}
-	infoCache[ref] = info
-	return info, nil
-}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
deleted file mode 100755
index 68d02a0..0000000
--- a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-go get github.com/golang/protobuf/protoc-gen-go
-
-protoc \
---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto 
-
diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md
deleted file mode 100644
index ff1c2eb..0000000
--- a/vendor/github.com/googleapis/gnostic/extensions/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Extensions
-
-This directory contains support code for building Gnostic extensions and associated examples.
-
-Extensions are used to compile vendor or specification extensions into protocol buffer structures.
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
deleted file mode 100644
index 749ff78..0000000
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: extension.proto
-
-/*
-Package openapiextension_v1 is a generated protocol buffer package.
-
-It is generated from these files:
-	extension.proto
-
-It has these top-level messages:
-	Version
-	ExtensionHandlerRequest
-	ExtensionHandlerResponse
-	Wrapper
-*/
-package openapiextension_v1
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// The version number of OpenAPI compiler.
-type Version struct {
-	Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
-	Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
-	Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
-	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
-	// be empty for mainline stable releases.
-	Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
-}
-
-func (m *Version) Reset()                    { *m = Version{} }
-func (m *Version) String() string            { return proto.CompactTextString(m) }
-func (*Version) ProtoMessage()               {}
-func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *Version) GetMajor() int32 {
-	if m != nil {
-		return m.Major
-	}
-	return 0
-}
-
-func (m *Version) GetMinor() int32 {
-	if m != nil {
-		return m.Minor
-	}
-	return 0
-}
-
-func (m *Version) GetPatch() int32 {
-	if m != nil {
-		return m.Patch
-	}
-	return 0
-}
-
-func (m *Version) GetSuffix() string {
-	if m != nil {
-		return m.Suffix
-	}
-	return ""
-}
-
-// An encoded Request is written to the ExtensionHandler's stdin.
-type ExtensionHandlerRequest struct {
-	// The OpenAPI descriptions that were explicitly listed on the command line.
-	// The specifications will appear in the order they are specified to gnostic.
-	Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"`
-	// The version number of openapi compiler.
-	CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
-}
-
-func (m *ExtensionHandlerRequest) Reset()                    { *m = ExtensionHandlerRequest{} }
-func (m *ExtensionHandlerRequest) String() string            { return proto.CompactTextString(m) }
-func (*ExtensionHandlerRequest) ProtoMessage()               {}
-func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
-	if m != nil {
-		return m.Wrapper
-	}
-	return nil
-}
-
-func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
-	if m != nil {
-		return m.CompilerVersion
-	}
-	return nil
-}
-
-// The extensions writes an encoded ExtensionHandlerResponse to stdout.
-type ExtensionHandlerResponse struct {
-	// true if the extension is handled by the extension handler; false otherwise
-	Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"`
-	// Error message.  If non-empty, the extension handling failed.
-	// The extension handler process should exit with status code zero
-	// even if it reports an error in this way.
-	//
-	// This should be used to indicate errors which prevent the extension from
-	// operating as intended.  Errors which indicate a problem in gnostic
-	// itself -- such as the input Document being unparseable -- should be
-	// reported by writing a message to stderr and exiting with a non-zero
-	// status code.
-	Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"`
-	// text output
-	Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *ExtensionHandlerResponse) Reset()                    { *m = ExtensionHandlerResponse{} }
-func (m *ExtensionHandlerResponse) String() string            { return proto.CompactTextString(m) }
-func (*ExtensionHandlerResponse) ProtoMessage()               {}
-func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *ExtensionHandlerResponse) GetHandled() bool {
-	if m != nil {
-		return m.Handled
-	}
-	return false
-}
-
-func (m *ExtensionHandlerResponse) GetError() []string {
-	if m != nil {
-		return m.Error
-	}
-	return nil
-}
-
-func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-type Wrapper struct {
-	// version of the OpenAPI specification in which this extension was written.
-	Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
-	// Name of the extension
-	ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"`
-	// Must be a valid yaml for the proto
-	Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"`
-}
-
-func (m *Wrapper) Reset()                    { *m = Wrapper{} }
-func (m *Wrapper) String() string            { return proto.CompactTextString(m) }
-func (*Wrapper) ProtoMessage()               {}
-func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *Wrapper) GetVersion() string {
-	if m != nil {
-		return m.Version
-	}
-	return ""
-}
-
-func (m *Wrapper) GetExtensionName() string {
-	if m != nil {
-		return m.ExtensionName
-	}
-	return ""
-}
-
-func (m *Wrapper) GetYaml() string {
-	if m != nil {
-		return m.Yaml
-	}
-	return ""
-}
-
-func init() {
-	proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
-	proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
-	proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
-	proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
-}
-
-func init() { proto.RegisterFile("extension.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
-	// 357 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40,
-	0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64,
-	0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46,
-	0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7,
-	0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22,
-	0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11,
-	0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18,
-	0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8,
-	0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c,
-	0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69,
-	0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef,
-	0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3,
-	0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87,
-	0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44,
-	0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80,
-	0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4,
-	0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54,
-	0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c,
-	0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e,
-	0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2,
-	0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa,
-	0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b,
-	0x80, 0x51, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
deleted file mode 100644
index 04856f9..0000000
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-import "google/protobuf/any.proto";
-package openapiextension.v1;
-
-// This option lets the proto compiler generate Java code inside the package
-// name (see below) instead of inside an outer class. It creates a simpler
-// developer experience by reducing one-level of name nesting and be
-// consistent with most programming languages that don't support outer classes.
-option java_multiple_files = true;
-
-// The Java outer classname should be the filename in UpperCamelCase. This
-// class is only used to hold proto descriptor, so developers don't need to
-// work with it directly.
-option java_outer_classname = "OpenAPIExtensionV1";
-
-// The Java package name must be proto package name with proper prefix.
-option java_package = "org.gnostic.v1";
-
-// A reasonable prefix for the Objective-C symbols generated from the package.
-// It should at a minimum be 3 characters long, all uppercase, and convention
-// is to use an abbreviation of the package name. Something short, but
-// hopefully unique enough to not conflict with things that may come along in
-// the future. 'GPB' is reserved for the protocol buffer implementation itself.
-//
-option objc_class_prefix = "OAE"; // "OpenAPI Extension"
-
-// The version number of OpenAPI compiler.
-message Version {
-  int32 major = 1;
-  int32 minor = 2;
-  int32 patch = 3;
-  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
-  // be empty for mainline stable releases.
-  string suffix = 4;
-}
-
-// An encoded Request is written to the ExtensionHandler's stdin.
-message ExtensionHandlerRequest {
-
-  // The OpenAPI descriptions that were explicitly listed on the command line.
-  // The specifications will appear in the order they are specified to gnostic.
-  Wrapper wrapper = 1;
-
-  // The version number of openapi compiler.
-  Version compiler_version = 3;
-}
-
-// The extensions writes an encoded ExtensionHandlerResponse to stdout.
-message ExtensionHandlerResponse {
-
-  // true if the extension is handled by the extension handler; false otherwise
-  bool handled = 1;
-
-  // Error message.  If non-empty, the extension handling failed.
-  // The extension handler process should exit with status code zero
-  // even if it reports an error in this way.
-  //
-  // This should be used to indicate errors which prevent the extension from
-  // operating as intended.  Errors which indicate a problem in gnostic
-  // itself -- such as the input Document being unparseable -- should be
-  // reported by writing a message to stderr and exiting with a non-zero
-  // status code.
-  repeated string error = 2;
-
-  // text output
-  google.protobuf.Any value = 3;
-}
-
-message Wrapper {
-  // version of the OpenAPI specification in which this extension was written.
-  string version = 1;
-
-  // Name of the extension
-  string extension_name = 2;
-
-  // Must be a valid yaml for the proto
-  string yaml = 3;
-}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
deleted file mode 100644
index 94a8e62..0000000
--- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//    http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openapiextension_v1
-
-import (
-	"fmt"
-	"io/ioutil"
-	"os"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-)
-
-type documentHandler func(version string, extensionName string, document string)
-type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
-
-func forInputYamlFromOpenapic(handler documentHandler) {
-	data, err := ioutil.ReadAll(os.Stdin)
-	if err != nil {
-		fmt.Println("File error:", err.Error())
-		os.Exit(1)
-	}
-	if len(data) == 0 {
-		fmt.Println("No input data.")
-		os.Exit(1)
-	}
-	request := &ExtensionHandlerRequest{}
-	err = proto.Unmarshal(data, request)
-	if err != nil {
-		fmt.Println("Input error:", err.Error())
-		os.Exit(1)
-	}
-	handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
-}
-
-// ProcessExtension calles the handler for a specified extension.
-func ProcessExtension(handleExtension extensionHandler) {
-	response := &ExtensionHandlerResponse{}
-	forInputYamlFromOpenapic(
-		func(version string, extensionName string, yamlInput string) {
-			var newObject proto.Message
-			var err error
-
-			handled, newObject, err := handleExtension(extensionName, yamlInput)
-			if !handled {
-				responseBytes, _ := proto.Marshal(response)
-				os.Stdout.Write(responseBytes)
-				os.Exit(0)
-			}
-
-			// If we reach here, then the extension is handled
-			response.Handled = true
-			if err != nil {
-				response.Error = append(response.Error, err.Error())
-				responseBytes, _ := proto.Marshal(response)
-				os.Stdout.Write(responseBytes)
-				os.Exit(0)
-			}
-			response.Value, err = ptypes.MarshalAny(newObject)
-			if err != nil {
-				response.Error = append(response.Error, err.Error())
-				responseBytes, _ := proto.Marshal(response)
-				os.Stdout.Write(responseBytes)
-				os.Exit(0)
-			}
-		})
-
-	responseBytes, _ := proto.Marshal(response)
-	os.Stdout.Write(responseBytes)
-}
diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml
deleted file mode 100644
index 597bc99..0000000
--- a/vendor/github.com/gregjones/httpcache/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-sudo: false
-language: go
-matrix:
-  allow_failures:
-    - go: master
-  fast_finish: true
-  include:
-    - go: 1.10.x
-    - go: 1.11.x
-      env: GOFMT=1
-    - go: master
-install:
-  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
-  - go get -t -v ./...
-  - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi
-  - go tool vet .
-  - go test -v -race ./...
diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt
deleted file mode 100644
index 81316be..0000000
--- a/vendor/github.com/gregjones/httpcache/LICENSE.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright © 2012 Greg Jones (greg.jones@gmail.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md
deleted file mode 100644
index 09c9e7c..0000000
--- a/vendor/github.com/gregjones/httpcache/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-httpcache
-=========
-
-[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
-
-Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses.
-
-It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
-
-Cache Backends
---------------
-
-- The built-in 'memory' cache stores responses in an in-memory map.
-- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
-- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
-- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
-- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
-- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
-- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
-- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork).
-
-License
--------
-
--	[MIT License](LICENSE.txt)
diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
deleted file mode 100644
index 42e3129..0000000
--- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
-// to supplement an in-memory map with persistent storage
-//
-package diskcache
-
-import (
-	"bytes"
-	"crypto/md5"
-	"encoding/hex"
-	"github.com/peterbourgon/diskv"
-	"io"
-)
-
-// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
-type Cache struct {
-	d *diskv.Diskv
-}
-
-// Get returns the response corresponding to key if present
-func (c *Cache) Get(key string) (resp []byte, ok bool) {
-	key = keyToFilename(key)
-	resp, err := c.d.Read(key)
-	if err != nil {
-		return []byte{}, false
-	}
-	return resp, true
-}
-
-// Set saves a response to the cache as key
-func (c *Cache) Set(key string, resp []byte) {
-	key = keyToFilename(key)
-	c.d.WriteStream(key, bytes.NewReader(resp), true)
-}
-
-// Delete removes the response with key from the cache
-func (c *Cache) Delete(key string) {
-	key = keyToFilename(key)
-	c.d.Erase(key)
-}
-
-func keyToFilename(key string) string {
-	h := md5.New()
-	io.WriteString(h, key)
-	return hex.EncodeToString(h.Sum(nil))
-}
-
-// New returns a new Cache that will store files in basePath
-func New(basePath string) *Cache {
-	return &Cache{
-		d: diskv.New(diskv.Options{
-			BasePath:     basePath,
-			CacheSizeMax: 100 * 1024 * 1024, // 100MB
-		}),
-	}
-}
-
-// NewWithDiskv returns a new Cache using the provided Diskv as underlying
-// storage.
-func NewWithDiskv(d *diskv.Diskv) *Cache {
-	return &Cache{d}
-}
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
deleted file mode 100644
index b41a63d..0000000
--- a/vendor/github.com/gregjones/httpcache/httpcache.go
+++ /dev/null
@@ -1,551 +0,0 @@
-// Package httpcache provides a http.RoundTripper implementation that works as a
-// mostly RFC-compliant cache for http responses.
-//
-// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
-// and not for a shared proxy).
-//
-package httpcache
-
-import (
-	"bufio"
-	"bytes"
-	"errors"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/http/httputil"
-	"strings"
-	"sync"
-	"time"
-)
-
-const (
-	stale = iota
-	fresh
-	transparent
-	// XFromCache is the header added to responses that are returned from the cache
-	XFromCache = "X-From-Cache"
-)
-
-// A Cache interface is used by the Transport to store and retrieve responses.
-type Cache interface {
-	// Get returns the []byte representation of a cached response and a bool
-	// set to true if the value isn't empty
-	Get(key string) (responseBytes []byte, ok bool)
-	// Set stores the []byte representation of a response against a key
-	Set(key string, responseBytes []byte)
-	// Delete removes the value associated with the key
-	Delete(key string)
-}
-
-// cacheKey returns the cache key for req.
-func cacheKey(req *http.Request) string {
-	if req.Method == http.MethodGet {
-		return req.URL.String()
-	} else {
-		return req.Method + " " + req.URL.String()
-	}
-}
-
-// CachedResponse returns the cached http.Response for req if present, and nil
-// otherwise.
-func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
-	cachedVal, ok := c.Get(cacheKey(req))
-	if !ok {
-		return
-	}
-
-	b := bytes.NewBuffer(cachedVal)
-	return http.ReadResponse(bufio.NewReader(b), req)
-}
-
-// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
-type MemoryCache struct {
-	mu    sync.RWMutex
-	items map[string][]byte
-}
-
-// Get returns the []byte representation of the response and true if present, false if not
-func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
-	c.mu.RLock()
-	resp, ok = c.items[key]
-	c.mu.RUnlock()
-	return resp, ok
-}
-
-// Set saves response resp to the cache with key
-func (c *MemoryCache) Set(key string, resp []byte) {
-	c.mu.Lock()
-	c.items[key] = resp
-	c.mu.Unlock()
-}
-
-// Delete removes key from the cache
-func (c *MemoryCache) Delete(key string) {
-	c.mu.Lock()
-	delete(c.items, key)
-	c.mu.Unlock()
-}
-
-// NewMemoryCache returns a new Cache that will store items in an in-memory map
-func NewMemoryCache() *MemoryCache {
-	c := &MemoryCache{items: map[string][]byte{}}
-	return c
-}
-
-// Transport is an implementation of http.RoundTripper that will return values from a cache
-// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
-// to repeated requests allowing servers to return 304 / Not Modified
-type Transport struct {
-	// The RoundTripper interface actually used to make requests
-	// If nil, http.DefaultTransport is used
-	Transport http.RoundTripper
-	Cache     Cache
-	// If true, responses returned from the cache will be given an extra header, X-From-Cache
-	MarkCachedResponses bool
-}
-
-// NewTransport returns a new Transport with the
-// provided Cache implementation and MarkCachedResponses set to true
-func NewTransport(c Cache) *Transport {
-	return &Transport{Cache: c, MarkCachedResponses: true}
-}
-
-// Client returns an *http.Client that caches responses.
-func (t *Transport) Client() *http.Client {
-	return &http.Client{Transport: t}
-}
-
-// varyMatches will return false unless all of the cached values for the headers listed in Vary
-// match the new request
-func varyMatches(cachedResp *http.Response, req *http.Request) bool {
-	for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
-		header = http.CanonicalHeaderKey(header)
-		if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
-			return false
-		}
-	}
-	return true
-}
-
-// RoundTrip takes a Request and returns a Response
-//
-// If there is a fresh Response already in cache, then it will be returned without connecting to
-// the server.
-//
-// If there is a stale Response, then any validators it contains will be set on the new request
-// to give the server a chance to respond with NotModified. If this happens, then the cached Response
-// will be returned.
-func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
-	cacheKey := cacheKey(req)
-	cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
-	var cachedResp *http.Response
-	if cacheable {
-		cachedResp, err = CachedResponse(t.Cache, req)
-	} else {
-		// Need to invalidate an existing value
-		t.Cache.Delete(cacheKey)
-	}
-
-	transport := t.Transport
-	if transport == nil {
-		transport = http.DefaultTransport
-	}
-
-	if cacheable && cachedResp != nil && err == nil {
-		if t.MarkCachedResponses {
-			cachedResp.Header.Set(XFromCache, "1")
-		}
-
-		if varyMatches(cachedResp, req) {
-			// Can only use cached value if the new request doesn't Vary significantly
-			freshness := getFreshness(cachedResp.Header, req.Header)
-			if freshness == fresh {
-				return cachedResp, nil
-			}
-
-			if freshness == stale {
-				var req2 *http.Request
-				// Add validators if caller hasn't already done so
-				etag := cachedResp.Header.Get("etag")
-				if etag != "" && req.Header.Get("etag") == "" {
-					req2 = cloneRequest(req)
-					req2.Header.Set("if-none-match", etag)
-				}
-				lastModified := cachedResp.Header.Get("last-modified")
-				if lastModified != "" && req.Header.Get("last-modified") == "" {
-					if req2 == nil {
-						req2 = cloneRequest(req)
-					}
-					req2.Header.Set("if-modified-since", lastModified)
-				}
-				if req2 != nil {
-					req = req2
-				}
-			}
-		}
-
-		resp, err = transport.RoundTrip(req)
-		if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
-			// Replace the 304 response with the one from cache, but update with some new headers
-			endToEndHeaders := getEndToEndHeaders(resp.Header)
-			for _, header := range endToEndHeaders {
-				cachedResp.Header[header] = resp.Header[header]
-			}
-			resp = cachedResp
-		} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
-			req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
-			// In case of transport failure and stale-if-error activated, returns cached content
-			// when available
-			return cachedResp, nil
-		} else {
-			if err != nil || resp.StatusCode != http.StatusOK {
-				t.Cache.Delete(cacheKey)
-			}
-			if err != nil {
-				return nil, err
-			}
-		}
-	} else {
-		reqCacheControl := parseCacheControl(req.Header)
-		if _, ok := reqCacheControl["only-if-cached"]; ok {
-			resp = newGatewayTimeoutResponse(req)
-		} else {
-			resp, err = transport.RoundTrip(req)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
-		for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
-			varyKey = http.CanonicalHeaderKey(varyKey)
-			fakeHeader := "X-Varied-" + varyKey
-			reqValue := req.Header.Get(varyKey)
-			if reqValue != "" {
-				resp.Header.Set(fakeHeader, reqValue)
-			}
-		}
-		switch req.Method {
-		case "GET":
-			// Delay caching until EOF is reached.
-			resp.Body = &cachingReadCloser{
-				R: resp.Body,
-				OnEOF: func(r io.Reader) {
-					resp := *resp
-					resp.Body = ioutil.NopCloser(r)
-					respBytes, err := httputil.DumpResponse(&resp, true)
-					if err == nil {
-						t.Cache.Set(cacheKey, respBytes)
-					}
-				},
-			}
-		default:
-			respBytes, err := httputil.DumpResponse(resp, true)
-			if err == nil {
-				t.Cache.Set(cacheKey, respBytes)
-			}
-		}
-	} else {
-		t.Cache.Delete(cacheKey)
-	}
-	return resp, nil
-}
-
-// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
-var ErrNoDateHeader = errors.New("no Date header")
-
-// Date parses and returns the value of the Date header.
-func Date(respHeaders http.Header) (date time.Time, err error) {
-	dateHeader := respHeaders.Get("date")
-	if dateHeader == "" {
-		err = ErrNoDateHeader
-		return
-	}
-
-	return time.Parse(time.RFC1123, dateHeader)
-}
-
-type realClock struct{}
-
-func (c *realClock) since(d time.Time) time.Duration {
-	return time.Since(d)
-}
-
-type timer interface {
-	since(d time.Time) time.Duration
-}
-
-var clock timer = &realClock{}
-
-// getFreshness will return one of fresh/stale/transparent based on the cache-control
-// values of the request and the response
-//
-// fresh indicates the response can be returned
-// stale indicates that the response needs validating before it is returned
-// transparent indicates the response should not be used to fulfil the request
-//
-// Because this is only a private cache, 'public' and 'private' in cache-control aren't
-// signficant. Similarly, smax-age isn't used.
-func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
-	respCacheControl := parseCacheControl(respHeaders)
-	reqCacheControl := parseCacheControl(reqHeaders)
-	if _, ok := reqCacheControl["no-cache"]; ok {
-		return transparent
-	}
-	if _, ok := respCacheControl["no-cache"]; ok {
-		return stale
-	}
-	if _, ok := reqCacheControl["only-if-cached"]; ok {
-		return fresh
-	}
-
-	date, err := Date(respHeaders)
-	if err != nil {
-		return stale
-	}
-	currentAge := clock.since(date)
-
-	var lifetime time.Duration
-	var zeroDuration time.Duration
-
-	// If a response includes both an Expires header and a max-age directive,
-	// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
-	if maxAge, ok := respCacheControl["max-age"]; ok {
-		lifetime, err = time.ParseDuration(maxAge + "s")
-		if err != nil {
-			lifetime = zeroDuration
-		}
-	} else {
-		expiresHeader := respHeaders.Get("Expires")
-		if expiresHeader != "" {
-			expires, err := time.Parse(time.RFC1123, expiresHeader)
-			if err != nil {
-				lifetime = zeroDuration
-			} else {
-				lifetime = expires.Sub(date)
-			}
-		}
-	}
-
-	if maxAge, ok := reqCacheControl["max-age"]; ok {
-		// the client is willing to accept a response whose age is no greater than the specified time in seconds
-		lifetime, err = time.ParseDuration(maxAge + "s")
-		if err != nil {
-			lifetime = zeroDuration
-		}
-	}
-	if minfresh, ok := reqCacheControl["min-fresh"]; ok {
-		//  the client wants a response that will still be fresh for at least the specified number of seconds.
-		minfreshDuration, err := time.ParseDuration(minfresh + "s")
-		if err == nil {
-			currentAge = time.Duration(currentAge + minfreshDuration)
-		}
-	}
-
-	if maxstale, ok := reqCacheControl["max-stale"]; ok {
-		// Indicates that the client is willing to accept a response that has exceeded its expiration time.
-		// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
-		// its expiration time by no more than the specified number of seconds.
-		// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
-		//
-		// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
-		// but that seems like a  hassle, and is it actually useful? If so, then there needs to be a different
-		// return-value available here.
-		if maxstale == "" {
-			return fresh
-		}
-		maxstaleDuration, err := time.ParseDuration(maxstale + "s")
-		if err == nil {
-			currentAge = time.Duration(currentAge - maxstaleDuration)
-		}
-	}
-
-	if lifetime > currentAge {
-		return fresh
-	}
-
-	return stale
-}
-
-// Returns true if either the request or the response includes the stale-if-error
-// cache control extension: https://tools.ietf.org/html/rfc5861
-func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
-	respCacheControl := parseCacheControl(respHeaders)
-	reqCacheControl := parseCacheControl(reqHeaders)
-
-	var err error
-	lifetime := time.Duration(-1)
-
-	if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
-		if staleMaxAge != "" {
-			lifetime, err = time.ParseDuration(staleMaxAge + "s")
-			if err != nil {
-				return false
-			}
-		} else {
-			return true
-		}
-	}
-	if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
-		if staleMaxAge != "" {
-			lifetime, err = time.ParseDuration(staleMaxAge + "s")
-			if err != nil {
-				return false
-			}
-		} else {
-			return true
-		}
-	}
-
-	if lifetime >= 0 {
-		date, err := Date(respHeaders)
-		if err != nil {
-			return false
-		}
-		currentAge := clock.since(date)
-		if lifetime > currentAge {
-			return true
-		}
-	}
-
-	return false
-}
-
-func getEndToEndHeaders(respHeaders http.Header) []string {
-	// These headers are always hop-by-hop
-	hopByHopHeaders := map[string]struct{}{
-		"Connection":          {},
-		"Keep-Alive":          {},
-		"Proxy-Authenticate":  {},
-		"Proxy-Authorization": {},
-		"Te":                  {},
-		"Trailers":            {},
-		"Transfer-Encoding":   {},
-		"Upgrade":             {},
-	}
-
-	for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
-		// any header listed in connection, if present, is also considered hop-by-hop
-		if strings.Trim(extra, " ") != "" {
-			hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
-		}
-	}
-	endToEndHeaders := []string{}
-	for respHeader := range respHeaders {
-		if _, ok := hopByHopHeaders[respHeader]; !ok {
-			endToEndHeaders = append(endToEndHeaders, respHeader)
-		}
-	}
-	return endToEndHeaders
-}
-
-func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
-	if _, ok := respCacheControl["no-store"]; ok {
-		return false
-	}
-	if _, ok := reqCacheControl["no-store"]; ok {
-		return false
-	}
-	return true
-}
-
-func newGatewayTimeoutResponse(req *http.Request) *http.Response {
-	var braw bytes.Buffer
-	braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
-	resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
-	if err != nil {
-		panic(err)
-	}
-	return resp
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
-func cloneRequest(r *http.Request) *http.Request {
-	// shallow copy of the struct
-	r2 := new(http.Request)
-	*r2 = *r
-	// deep copy of the Header
-	r2.Header = make(http.Header)
-	for k, s := range r.Header {
-		r2.Header[k] = s
-	}
-	return r2
-}
-
-type cacheControl map[string]string
-
-func parseCacheControl(headers http.Header) cacheControl {
-	cc := cacheControl{}
-	ccHeader := headers.Get("Cache-Control")
-	for _, part := range strings.Split(ccHeader, ",") {
-		part = strings.Trim(part, " ")
-		if part == "" {
-			continue
-		}
-		if strings.ContainsRune(part, '=') {
-			keyval := strings.Split(part, "=")
-			cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
-		} else {
-			cc[part] = ""
-		}
-	}
-	return cc
-}
-
-// headerAllCommaSepValues returns all comma-separated values (each
-// with whitespace trimmed) for header name in headers. According to
-// Section 4.2 of the HTTP/1.1 spec
-// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
-// values from multiple occurrences of a header should be concatenated, if
-// the header's value is a comma-separated list.
-func headerAllCommaSepValues(headers http.Header, name string) []string {
-	var vals []string
-	for _, val := range headers[http.CanonicalHeaderKey(name)] {
-		fields := strings.Split(val, ",")
-		for i, f := range fields {
-			fields[i] = strings.TrimSpace(f)
-		}
-		vals = append(vals, fields...)
-	}
-	return vals
-}
-
-// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
-// handler with a full copy of the content read from R when EOF is
-// reached.
-type cachingReadCloser struct {
-	// Underlying ReadCloser.
-	R io.ReadCloser
-	// OnEOF is called with a copy of the content of R when EOF is reached.
-	OnEOF func(io.Reader)
-
-	buf bytes.Buffer // buf stores a copy of the content of R.
-}
-
-// Read reads the next len(p) bytes from R or until R is drained. The
-// return value n is the number of bytes read. If R has no data to
-// return, err is io.EOF and OnEOF is called with a full copy of what
-// has been read so far.
-func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
-	n, err = r.R.Read(p)
-	r.buf.Write(p[:n])
-	if err == io.EOF {
-		r.OnEOF(bytes.NewReader(r.buf.Bytes()))
-	}
-	return n, err
-}
-
-func (r *cachingReadCloser) Close() error {
-	return r.R.Close()
-}
-
-// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
-func NewMemoryCacheTransport() *Transport {
-	c := NewMemoryCache()
-	t := NewTransport(c)
-	return t
-}
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
index 53a0523..124409f 100644
--- a/vendor/github.com/hashicorp/consul/api/acl.go
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -4,7 +4,10 @@
 	"fmt"
 	"io"
 	"io/ioutil"
+	"net/url"
 	"time"
+
+	"github.com/mitchellh/mapstructure"
 )
 
 const (
@@ -19,18 +22,26 @@
 	ID   string
 	Name string
 }
+type ACLTokenRoleLink struct {
+	ID   string
+	Name string
+}
 
 // ACLToken represents an ACL Token
 type ACLToken struct {
-	CreateIndex uint64
-	ModifyIndex uint64
-	AccessorID  string
-	SecretID    string
-	Description string
-	Policies    []*ACLTokenPolicyLink
-	Local       bool
-	CreateTime  time.Time `json:",omitempty"`
-	Hash        []byte    `json:",omitempty"`
+	CreateIndex       uint64
+	ModifyIndex       uint64
+	AccessorID        string
+	SecretID          string
+	Description       string
+	Policies          []*ACLTokenPolicyLink `json:",omitempty"`
+	Roles             []*ACLTokenRoleLink   `json:",omitempty"`
+	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+	Local             bool
+	ExpirationTTL     time.Duration `json:",omitempty"`
+	ExpirationTime    *time.Time    `json:",omitempty"`
+	CreateTime        time.Time     `json:",omitempty"`
+	Hash              []byte        `json:",omitempty"`
 
 	// DEPRECATED (ACL-Legacy-Compat)
 	// Rules will only be present for legacy tokens returned via the new APIs
@@ -38,15 +49,18 @@
 }
 
 type ACLTokenListEntry struct {
-	CreateIndex uint64
-	ModifyIndex uint64
-	AccessorID  string
-	Description string
-	Policies    []*ACLTokenPolicyLink
-	Local       bool
-	CreateTime  time.Time
-	Hash        []byte
-	Legacy      bool
+	CreateIndex       uint64
+	ModifyIndex       uint64
+	AccessorID        string
+	Description       string
+	Policies          []*ACLTokenPolicyLink `json:",omitempty"`
+	Roles             []*ACLTokenRoleLink   `json:",omitempty"`
+	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+	Local             bool
+	ExpirationTime    *time.Time `json:",omitempty"`
+	CreateTime        time.Time
+	Hash              []byte
+	Legacy            bool
 }
 
 // ACLEntry is used to represent a legacy ACL token
@@ -67,11 +81,20 @@
 	SourceDatacenter     string
 	ReplicationType      string
 	ReplicatedIndex      uint64
+	ReplicatedRoleIndex  uint64
 	ReplicatedTokenIndex uint64
 	LastSuccess          time.Time
 	LastError            time.Time
 }
 
+// ACLServiceIdentity represents a high-level grant of all necessary privileges
+// to assume the identity of the named Service in the Catalog and within
+// Connect.
+type ACLServiceIdentity struct {
+	ServiceName string
+	Datacenters []string `json:",omitempty"`
+}
+
 // ACLPolicy represents an ACL Policy.
 type ACLPolicy struct {
 	ID          string
@@ -94,6 +117,113 @@
 	ModifyIndex uint64
 }
 
+type ACLRolePolicyLink struct {
+	ID   string
+	Name string
+}
+
+// ACLRole represents an ACL Role.
+type ACLRole struct {
+	ID                string
+	Name              string
+	Description       string
+	Policies          []*ACLRolePolicyLink  `json:",omitempty"`
+	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+	Hash              []byte
+	CreateIndex       uint64
+	ModifyIndex       uint64
+}
+
+// BindingRuleBindType is the type of binding rule mechanism used.
+type BindingRuleBindType string
+
+const (
+	// BindingRuleBindTypeService binds to a service identity with the given name.
+	BindingRuleBindTypeService BindingRuleBindType = "service"
+
+	// BindingRuleBindTypeRole binds to pre-existing roles with the given name.
+	BindingRuleBindTypeRole BindingRuleBindType = "role"
+)
+
+type ACLBindingRule struct {
+	ID          string
+	Description string
+	AuthMethod  string
+	Selector    string
+	BindType    BindingRuleBindType
+	BindName    string
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+type ACLAuthMethod struct {
+	Name        string
+	Type        string
+	Description string
+
+	// Configuration is arbitrary configuration for the auth method. This
+	// should only contain primitive values and containers (such as lists and
+	// maps).
+	Config map[string]interface{}
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+type ACLAuthMethodListEntry struct {
+	Name        string
+	Type        string
+	Description string
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
+// KubernetesAuthMethodConfig.
+func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
+	var config KubernetesAuthMethodConfig
+	decodeConf := &mapstructure.DecoderConfig{
+		Result:           &config,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := mapstructure.NewDecoder(decodeConf)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := decoder.Decode(raw); err != nil {
+		return nil, fmt.Errorf("error decoding config: %s", err)
+	}
+
+	return &config, nil
+}
+
+// KubernetesAuthMethodConfig is the config for the built-in Consul auth method
+// for Kubernetes.
+type KubernetesAuthMethodConfig struct {
+	Host              string `json:",omitempty"`
+	CACert            string `json:",omitempty"`
+	ServiceAccountJWT string `json:",omitempty"`
+}
+
+// RenderToConfig converts this into a map[string]interface{} suitable for use
+// in the ACLAuthMethod.Config field.
+func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} {
+	return map[string]interface{}{
+		"Host":              c.Host,
+		"CACert":            c.CACert,
+		"ServiceAccountJWT": c.ServiceAccountJWT,
+	}
+}
+
+type ACLLoginParams struct {
+	AuthMethod  string
+	BearerToken string
+	Meta        map[string]string `json:",omitempty"`
+}
+
 // ACL can be used to query the ACL endpoints
 type ACL struct {
 	c *Client
@@ -266,17 +396,9 @@
 	return entries, qm, nil
 }
 
-// TokenCreate creates a new ACL token. It requires that the AccessorID and SecretID fields
-// of the ACLToken structure to be empty as these will be filled in by Consul.
+// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields
+// of the ACLToken structure are empty they will be filled in by Consul.
 func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
-	if token.AccessorID != "" {
-		return nil, nil, fmt.Errorf("Cannot specify an AccessorID in Token Creation")
-	}
-
-	if token.SecretID != "" {
-		return nil, nil, fmt.Errorf("Cannot specify a SecretID in Token Creation")
-	}
-
 	r := a.c.newRequest("PUT", "/v1/acl/token")
 	r.setWriteOptions(q)
 	r.obj = token
@@ -437,7 +559,6 @@
 	if policy.ID != "" {
 		return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation")
 	}
-
 	r := a.c.newRequest("PUT", "/v1/acl/policy")
 	r.setWriteOptions(q)
 	r.obj = policy
@@ -460,7 +581,7 @@
 // existing policy ID
 func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
 	if policy.ID == "" {
-		return nil, nil, fmt.Errorf("Must specify an ID in Policy Creation")
+		return nil, nil, fmt.Errorf("Must specify an ID in Policy Update")
 	}
 
 	r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID)
@@ -586,3 +707,410 @@
 
 	return string(ruleBytes), nil
 }
+
+// RoleCreate will create a new role. It is not allowed for the role parameters
+// ID field to be set as this will be generated by Consul while processing the request.
+func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
+	if role.ID != "" {
+		return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/role")
+	r.setWriteOptions(q)
+	r.obj = role
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLRole
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// RoleUpdate updates a role. The ID field of the role parameter must be set to an
+// existing role ID
+func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
+	if role.ID == "" {
+		return nil, nil, fmt.Errorf("Must specify an ID in Role Update")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID)
+	r.setWriteOptions(q)
+	r.obj = role
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLRole
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// RoleDelete deletes a role given its ID.
+func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// RoleRead retrieves the role details (by ID). Returns nil if not found.
+func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/role/"+roleID)
+	r.setQueryOptions(q)
+	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if !found {
+		return nil, qm, nil
+	}
+
+	var out ACLRole
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// RoleReadByName retrieves the role details (by name). Returns nil if not found.
+func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName))
+	r.setQueryOptions(q)
+	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if !found {
+		return nil, qm, nil
+	}
+
+	var out ACLRole
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// RoleList retrieves a listing of all roles. The listing does not include some
+// metadata for the role as those should be retrieved by subsequent calls to
+// RoleRead.
+func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/roles")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLRole
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// AuthMethodCreate will create a new auth method.
+func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
+	if method.Name == "" {
+		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/auth-method")
+	r.setWriteOptions(q)
+	r.obj = method
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLAuthMethod
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// AuthMethodUpdate updates an auth method.
+func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
+	if method.Name == "" {
+		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name))
+	r.setWriteOptions(q)
+	r.obj = method
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLAuthMethod
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// AuthMethodDelete deletes an auth method given its Name.
+func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) {
+	if methodName == "" {
+		return nil, fmt.Errorf("Must specify a Name in Auth Method Delete")
+	}
+
+	r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// AuthMethodRead retrieves the auth method. Returns nil if not found.
+func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) {
+	if methodName == "" {
+		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read")
+	}
+
+	r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
+	r.setQueryOptions(q)
+	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if !found {
+		return nil, qm, nil
+	}
+
+	var out ACLAuthMethod
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// AuthMethodList retrieves a listing of all auth methods. The listing does not
+// include some metadata for the auth method as those should be retrieved by
+// subsequent calls to AuthMethodRead.
+func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/auth-methods")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLAuthMethodListEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// BindingRuleCreate will create a new binding rule. It is not allowed for the
+// binding rule parameter's ID field to be set as this will be generated by
+// Consul while processing the request.
+func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
+	if rule.ID != "" {
+		return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/binding-rule")
+	r.setWriteOptions(q)
+	r.obj = rule
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLBindingRule
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// BindingRuleUpdate updates a binding rule. The ID field of the role binding
+// rule parameter must be set to an existing binding rule ID.
+func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
+	if rule.ID == "" {
+		return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID)
+	r.setWriteOptions(q)
+	r.obj = rule
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLBindingRule
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// BindingRuleDelete deletes a binding rule given its ID.
+func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// BindingRuleRead retrieves the binding rule details. Returns nil if not found.
+func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID)
+	r.setQueryOptions(q)
+	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if !found {
+		return nil, qm, nil
+	}
+
+	var out ACLBindingRule
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// BindingRuleList retrieves a listing of all binding rules.
+func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/binding-rules")
+	if methodName != "" {
+		r.params.Set("authmethod", methodName)
+	}
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLBindingRule
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// Login is used to exchange auth method credentials for a newly-minted Consul Token.
+func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+	r := a.c.newRequest("POST", "/v1/acl/login")
+	r.setWriteOptions(q)
+	r.obj = auth
+
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, wm, nil
+}
+
+// Logout is used to destroy a Consul Token created via Login().
+func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("POST", "/v1/acl/logout")
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
index 6acf8ad..1ef3312 100644
--- a/vendor/github.com/hashicorp/consul/api/agent.go
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -2,7 +2,9 @@
 
 import (
 	"bufio"
+	"bytes"
 	"fmt"
+	"io"
 	"net/http"
 	"net/url"
 )
@@ -21,23 +23,11 @@
 	// service proxies another service within Consul and speaks the connect
 	// protocol.
 	ServiceKindConnectProxy ServiceKind = "connect-proxy"
-)
 
-// ProxyExecMode is the execution mode for a managed Connect proxy.
-type ProxyExecMode string
-
-const (
-	// ProxyExecModeDaemon indicates that the proxy command should be long-running
-	// and should be started and supervised by the agent until it's target service
-	// is deregistered.
-	ProxyExecModeDaemon ProxyExecMode = "daemon"
-
-	// ProxyExecModeScript indicates that the proxy command should be invoke to
-	// completion on each change to the configuration of lifecycle event. The
-	// script typically fetches the config and certificates from the agent API and
-	// then configures an externally managed daemon, perhaps starting and stopping
-	// it if necessary.
-	ProxyExecModeScript ProxyExecMode = "script"
+	// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
+	// service will proxy connections based off the SNI header set by other
+	// connect proxies
+	ServiceKindMeshGateway ServiceKind = "mesh-gateway"
 )
 
 // UpstreamDestType is the type of upstream discovery mechanism.
@@ -80,15 +70,14 @@
 	Meta              map[string]string
 	Port              int
 	Address           string
+	TaggedAddresses   map[string]ServiceAddress `json:",omitempty"`
 	Weights           AgentWeights
 	EnableTagOverride bool
-	CreateIndex       uint64 `json:",omitempty"`
-	ModifyIndex       uint64 `json:",omitempty"`
-	ContentHash       string `json:",omitempty"`
-	// DEPRECATED (ProxyDestination) - remove this field
-	ProxyDestination string                          `json:",omitempty"`
-	Proxy            *AgentServiceConnectProxyConfig `json:",omitempty"`
-	Connect          *AgentServiceConnect            `json:",omitempty"`
+	CreateIndex       uint64                          `json:",omitempty" bexpr:"-"`
+	ModifyIndex       uint64                          `json:",omitempty" bexpr:"-"`
+	ContentHash       string                          `json:",omitempty" bexpr:"-"`
+	Proxy             *AgentServiceConnectProxyConfig `json:",omitempty"`
+	Connect           *AgentServiceConnect            `json:",omitempty"`
 }
 
 // AgentServiceChecksInfo returns information about a Service and its checks
@@ -101,28 +90,19 @@
 // AgentServiceConnect represents the Connect configuration of a service.
 type AgentServiceConnect struct {
 	Native         bool                      `json:",omitempty"`
-	Proxy          *AgentServiceConnectProxy `json:",omitempty"`
-	SidecarService *AgentServiceRegistration `json:",omitempty"`
-}
-
-// AgentServiceConnectProxy represents the Connect Proxy configuration of a
-// service.
-type AgentServiceConnectProxy struct {
-	ExecMode  ProxyExecMode          `json:",omitempty"`
-	Command   []string               `json:",omitempty"`
-	Config    map[string]interface{} `json:",omitempty"`
-	Upstreams []Upstream             `json:",omitempty"`
+	SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
 }
 
 // AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
 // ServiceDefinition or response.
 type AgentServiceConnectProxyConfig struct {
-	DestinationServiceName string
+	DestinationServiceName string                 `json:",omitempty"`
 	DestinationServiceID   string                 `json:",omitempty"`
 	LocalServiceAddress    string                 `json:",omitempty"`
 	LocalServicePort       int                    `json:",omitempty"`
-	Config                 map[string]interface{} `json:",omitempty"`
-	Upstreams              []Upstream
+	Config                 map[string]interface{} `json:",omitempty" bexpr:"-"`
+	Upstreams              []Upstream             `json:",omitempty"`
+	MeshGateway            MeshGatewayConfig      `json:",omitempty"`
 }
 
 // AgentMember represents a cluster member known to the agent
@@ -155,21 +135,20 @@
 
 // AgentServiceRegistration is used to register a new service
 type AgentServiceRegistration struct {
-	Kind              ServiceKind       `json:",omitempty"`
-	ID                string            `json:",omitempty"`
-	Name              string            `json:",omitempty"`
-	Tags              []string          `json:",omitempty"`
-	Port              int               `json:",omitempty"`
-	Address           string            `json:",omitempty"`
-	EnableTagOverride bool              `json:",omitempty"`
-	Meta              map[string]string `json:",omitempty"`
-	Weights           *AgentWeights     `json:",omitempty"`
+	Kind              ServiceKind               `json:",omitempty"`
+	ID                string                    `json:",omitempty"`
+	Name              string                    `json:",omitempty"`
+	Tags              []string                  `json:",omitempty"`
+	Port              int                       `json:",omitempty"`
+	Address           string                    `json:",omitempty"`
+	TaggedAddresses   map[string]ServiceAddress `json:",omitempty"`
+	EnableTagOverride bool                      `json:",omitempty"`
+	Meta              map[string]string         `json:",omitempty"`
+	Weights           *AgentWeights             `json:",omitempty"`
 	Check             *AgentServiceCheck
 	Checks            AgentServiceChecks
-	// DEPRECATED (ProxyDestination) - remove this field
-	ProxyDestination string                          `json:",omitempty"`
-	Proxy            *AgentServiceConnectProxyConfig `json:",omitempty"`
-	Connect          *AgentServiceConnect            `json:",omitempty"`
+	Proxy             *AgentServiceConnectProxyConfig `json:",omitempty"`
+	Connect           *AgentServiceConnect            `json:",omitempty"`
 }
 
 // AgentCheckRegistration is used to register a new check
@@ -274,12 +253,8 @@
 	TargetServiceID   string
 	TargetServiceName string
 	ContentHash       string
-	// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
-	// but they don't need ExecMode or Command
-	ExecMode  ProxyExecMode `json:",omitempty"`
-	Command   []string      `json:",omitempty"`
-	Config    map[string]interface{}
-	Upstreams []Upstream
+	Config            map[string]interface{} `bexpr:"-"`
+	Upstreams         []Upstream
 }
 
 // Upstream is the response structure for a proxy upstream configuration.
@@ -290,7 +265,8 @@
 	Datacenter           string                 `json:",omitempty"`
 	LocalBindAddress     string                 `json:",omitempty"`
 	LocalBindPort        int                    `json:",omitempty"`
-	Config               map[string]interface{} `json:",omitempty"`
+	Config               map[string]interface{} `json:",omitempty" bexpr:"-"`
+	MeshGateway          MeshGatewayConfig      `json:",omitempty"`
 }
 
 // Agent can be used to query the Agent endpoints
@@ -385,7 +361,14 @@
 
 // Checks returns the locally registered checks
 func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+	return a.ChecksWithFilter("")
+}
+
+// ChecksWithFilter returns a subset of the locally registered checks that match
+// the given filter expression
+func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
 	r := a.c.newRequest("GET", "/v1/agent/checks")
+	r.filterQuery(filter)
 	_, resp, err := requireOK(a.c.doRequest(r))
 	if err != nil {
 		return nil, err
@@ -401,7 +384,14 @@
 
 // Services returns the locally registered services
 func (a *Agent) Services() (map[string]*AgentService, error) {
+	return a.ServicesWithFilter("")
+}
+
+// ServicesWithFilter returns a subset of the locally registered services that match
+// the given filter expression
+func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
 	r := a.c.newRequest("GET", "/v1/agent/services")
+	r.filterQuery(filter)
 	_, resp, err := requireOK(a.c.doRequest(r))
 	if err != nil {
 		return nil, err
@@ -799,31 +789,6 @@
 	return &out, qm, nil
 }
 
-// ConnectProxyConfig gets the configuration for a local managed proxy instance.
-//
-// Note that this uses an unconventional blocking mechanism since it's
-// agent-local state. That means there is no persistent raft index so we block
-// based on object hash instead.
-func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out ConnectProxyConfig
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
 // EnableServiceMaintenance toggles service maintenance mode on
 // for the given service ID.
 func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
@@ -1000,12 +965,20 @@
 	r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
 	r.setWriteOptions(q)
 	r.obj = &AgentToken{Token: token}
-	rtt, resp, err := requireOK(a.c.doRequest(r))
+
+	rtt, resp, err := a.c.doRequest(r)
 	if err != nil {
-		return nil, resp.StatusCode, err
+		return nil, 0, err
 	}
-	resp.Body.Close()
+	defer resp.Body.Close()
 
 	wm := &WriteMeta{RequestTime: rtt}
+
+	if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+	}
+
 	return wm, resp.StatusCode, nil
 }
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
index 39a0ad3..b624b3c 100644
--- a/vendor/github.com/hashicorp/consul/api/api.go
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -30,6 +30,10 @@
 	// the HTTP token.
 	HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
 
+	// HTTPTokenFileEnvName defines an environment variable name which sets
+	// the HTTP token file.
+	HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
+
 	// HTTPAuthEnvName defines an environment variable name which sets
 	// the HTTP authentication header.
 	HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
@@ -85,7 +89,7 @@
 	RequireConsistent bool
 
 	// UseCache requests that the agent cache results locally. See
-	// https://www.consul.io/api/index.html#agent-caching for more details on the
+	// https://www.consul.io/api/features/caching.html for more details on the
 	// semantics.
 	UseCache bool
 
@@ -95,14 +99,14 @@
 	// returned. Clients that wish to allow for stale results on error can set
 	// StaleIfError to a longer duration to change this behavior. It is ignored
 	// if the endpoint supports background refresh caching. See
-	// https://www.consul.io/api/index.html#agent-caching for more details.
+	// https://www.consul.io/api/features/caching.html for more details.
 	MaxAge time.Duration
 
 	// StaleIfError specifies how stale the client will accept a cached response
 	// if the servers are unavailable to fetch a fresh one. Only makes sense when
 	// UseCache is true and MaxAge is set to a lower, non-zero value. It is
 	// ignored if the endpoint supports background refresh caching. See
-	// https://www.consul.io/api/index.html#agent-caching for more details.
+	// https://www.consul.io/api/features/caching.html for more details.
 	StaleIfError time.Duration
 
 	// WaitIndex is used to enable a blocking query. Waits
@@ -139,6 +143,10 @@
 	// a value from 0 to 5 (inclusive).
 	RelayFactor uint8
 
+	// LocalOnly is used in keyring list operation to force the keyring
+	// query to only hit local servers (no WAN traffic).
+	LocalOnly bool
+
 	// Connect filters prepared query execution to only include Connect-capable
 	// services. This currently affects prepared query execution.
 	Connect bool
@@ -146,6 +154,10 @@
 	// ctx is an optional context pass through to the underlying HTTP
 	// request layer. Use Context() and WithContext() to manage this.
 	ctx context.Context
+
+	// Filter requests filtering data prior to it being returned. The string
+	// is a go-bexpr compatible expression.
+	Filter string
 }
 
 func (o *QueryOptions) Context() context.Context {
@@ -276,6 +288,10 @@
 	// which overrides the agent's default token.
 	Token string
 
+	// TokenFile is a file containing the current token to use for this client.
+	// If provided it is read once at startup and never again.
+	TokenFile string
+
 	TLSConfig TLSConfig
 }
 
@@ -339,6 +355,10 @@
 		config.Address = addr
 	}
 
+	if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
+		config.TokenFile = tokenFile
+	}
+
 	if token := os.Getenv(HTTPTokenEnvName); token != "" {
 		config.Token = token
 	}
@@ -445,6 +465,7 @@
 	env = append(env,
 		fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
 		fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
+		fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
 		fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
 		fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
 		fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
@@ -537,6 +558,19 @@
 		config.Address = parts[1]
 	}
 
+	// If the TokenFile is set, always use that, even if a Token is configured.
+	// This is because when TokenFile is set it is read into the Token field.
+	// We want any derived clients to have to re-read the token file.
+	if config.TokenFile != "" {
+		data, err := ioutil.ReadFile(config.TokenFile)
+		if err != nil {
+			return nil, fmt.Errorf("Error loading token file: %s", err)
+		}
+
+		if token := strings.TrimSpace(string(data)); token != "" {
+			config.Token = token
+		}
+	}
 	if config.Token == "" {
 		config.Token = defConfig.Token
 	}
@@ -614,6 +648,9 @@
 	if q.Near != "" {
 		r.params.Set("near", q.Near)
 	}
+	if q.Filter != "" {
+		r.params.Set("filter", q.Filter)
+	}
 	if len(q.NodeMeta) > 0 {
 		for key, value := range q.NodeMeta {
 			r.params.Add("node-meta", key+":"+value)
@@ -622,6 +659,9 @@
 	if q.RelayFactor != 0 {
 		r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
 	}
+	if q.LocalOnly {
+		r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly))
+	}
 	if q.Connect {
 		r.params.Set("connect", "true")
 	}
@@ -813,6 +853,8 @@
 }
 
 // parseQueryMeta is used to help parse query meta-data
+//
+// TODO(rb): bug? the error from this function is never handled
 func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
 	header := resp.Header
 
@@ -890,10 +932,42 @@
 		return d, nil, e
 	}
 	if resp.StatusCode != 200 {
-		var buf bytes.Buffer
-		io.Copy(&buf, resp.Body)
-		resp.Body.Close()
-		return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+		return d, nil, generateUnexpectedResponseCodeError(resp)
 	}
 	return d, resp, nil
 }
+
+func (req *request) filterQuery(filter string) {
+	if filter == "" {
+		return
+	}
+
+	req.params.Set("filter", filter)
+}
+
+// generateUnexpectedResponseCodeError consumes the rest of the body, closes
+// the body stream and generates an error indicating the status code was
+// unexpected.
+func generateUnexpectedResponseCodeError(resp *http.Response) error {
+	var buf bytes.Buffer
+	io.Copy(&buf, resp.Body)
+	resp.Body.Close()
+	return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+}
+
+func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
+	if e != nil {
+		if resp != nil {
+			resp.Body.Close()
+		}
+		return false, d, nil, e
+	}
+	switch resp.StatusCode {
+	case 200:
+		return true, d, resp, nil
+	case 404:
+		return false, d, resp, nil
+	default:
+		return false, d, nil, generateUnexpectedResponseCodeError(resp)
+	}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
index c175c3f..3fb0553 100644
--- a/vendor/github.com/hashicorp/consul/api/catalog.go
+++ b/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -1,5 +1,10 @@
 package api
 
+import (
+	"net"
+	"strconv"
+)
+
 type Weights struct {
 	Passing int
 	Warning int
@@ -16,6 +21,11 @@
 	ModifyIndex     uint64
 }
 
+type ServiceAddress struct {
+	Address string
+	Port    int
+}
+
 type CatalogService struct {
 	ID                       string
 	Node                     string
@@ -26,17 +36,16 @@
 	ServiceID                string
 	ServiceName              string
 	ServiceAddress           string
+	ServiceTaggedAddresses   map[string]ServiceAddress
 	ServiceTags              []string
 	ServiceMeta              map[string]string
 	ServicePort              int
 	ServiceWeights           Weights
 	ServiceEnableTagOverride bool
-	// DEPRECATED (ProxyDestination) - remove the next comment!
-	// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
-	ServiceProxy *AgentServiceConnectProxyConfig
-	CreateIndex  uint64
-	Checks       HealthChecks
-	ModifyIndex  uint64
+	ServiceProxy             *AgentServiceConnectProxyConfig
+	CreateIndex              uint64
+	Checks                   HealthChecks
+	ModifyIndex              uint64
 }
 
 type CatalogNode struct {
@@ -242,3 +251,12 @@
 	}
 	return out, qm, nil
 }
+
+func ParseServiceAddr(addrPort string) (ServiceAddress, error) {
+	port := 0
+	host, portStr, err := net.SplitHostPort(addrPort)
+	if err == nil {
+		port, err = strconv.Atoi(portStr)
+	}
+	return ServiceAddress{Address: host, Port: port}, err
+}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go
new file mode 100644
index 0000000..1588f2e
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry.go
@@ -0,0 +1,319 @@
+package api
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/mitchellh/mapstructure"
+)
+
+const (
+	ServiceDefaults string = "service-defaults"
+	ProxyDefaults   string = "proxy-defaults"
+	ServiceRouter   string = "service-router"
+	ServiceSplitter string = "service-splitter"
+	ServiceResolver string = "service-resolver"
+
+	ProxyConfigGlobal string = "global"
+)
+
+type ConfigEntry interface {
+	GetKind() string
+	GetName() string
+	GetCreateIndex() uint64
+	GetModifyIndex() uint64
+}
+
+type MeshGatewayMode string
+
+const (
+	// MeshGatewayModeDefault represents no specific mode and should
+	// be used to indicate that a different layer of the configuration
+	// chain should take precedence
+	MeshGatewayModeDefault MeshGatewayMode = ""
+
+	// MeshGatewayModeNone represents that the Upstream Connect connections
+	// should be direct and not flow through a mesh gateway.
+	MeshGatewayModeNone MeshGatewayMode = "none"
+
+	// MeshGatewayModeLocal represents that the Upstrea Connect connections
+	// should be made to a mesh gateway in the local datacenter. This is
+	MeshGatewayModeLocal MeshGatewayMode = "local"
+
+	// MeshGatewayModeRemote represents that the Upstream Connect connections
+	// should be made to a mesh gateway in a remote datacenter.
+	MeshGatewayModeRemote MeshGatewayMode = "remote"
+)
+
+// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
+// services
+type MeshGatewayConfig struct {
+	// Mode is the mode that should be used for the upstream connection.
+	Mode MeshGatewayMode `json:",omitempty"`
+}
+
+type ServiceConfigEntry struct {
+	Kind        string
+	Name        string
+	Protocol    string            `json:",omitempty"`
+	MeshGateway MeshGatewayConfig `json:",omitempty"`
+	ExternalSNI string            `json:",omitempty"`
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+func (s *ServiceConfigEntry) GetKind() string {
+	return s.Kind
+}
+
+func (s *ServiceConfigEntry) GetName() string {
+	return s.Name
+}
+
+func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
+	return s.CreateIndex
+}
+
+func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
+	return s.ModifyIndex
+}
+
+type ProxyConfigEntry struct {
+	Kind        string
+	Name        string
+	Config      map[string]interface{} `json:",omitempty"`
+	MeshGateway MeshGatewayConfig      `json:",omitempty"`
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+func (p *ProxyConfigEntry) GetKind() string {
+	return p.Kind
+}
+
+func (p *ProxyConfigEntry) GetName() string {
+	return p.Name
+}
+
+func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
+	return p.CreateIndex
+}
+
+func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
+	return p.ModifyIndex
+}
+
+type rawEntryListResponse struct {
+	kind    string
+	Entries []map[string]interface{}
+}
+
+func makeConfigEntry(kind, name string) (ConfigEntry, error) {
+	switch kind {
+	case ServiceDefaults:
+		return &ServiceConfigEntry{Kind: kind, Name: name}, nil
+	case ProxyDefaults:
+		return &ProxyConfigEntry{Kind: kind, Name: name}, nil
+	case ServiceRouter:
+		return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil
+	case ServiceSplitter:
+		return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil
+	case ServiceResolver:
+		return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil
+	default:
+		return nil, fmt.Errorf("invalid config entry kind: %s", kind)
+	}
+}
+
+func MakeConfigEntry(kind, name string) (ConfigEntry, error) {
+	return makeConfigEntry(kind, name)
+}
+
+// DecodeConfigEntry will decode the result of using json.Unmarshal of a config
+// entry into a map[string]interface{}.
+//
+// Important caveats:
+//
+// - This will NOT work if the map[string]interface{} was produced using HCL
+// decoding as that requires more extensive parsing to work around the issues
+// with map[string][]interface{} that arise.
+//
+// - This will only decode fields using their camel case json field
+// representations.
+func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
+	var entry ConfigEntry
+
+	kindVal, ok := raw["Kind"]
+	if !ok {
+		kindVal, ok = raw["kind"]
+	}
+	if !ok {
+		return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
+	}
+
+	if kindStr, ok := kindVal.(string); ok {
+		newEntry, err := makeConfigEntry(kindStr, "")
+		if err != nil {
+			return nil, err
+		}
+		entry = newEntry
+	} else {
+		return nil, fmt.Errorf("Kind value in payload is not a string")
+	}
+
+	decodeConf := &mapstructure.DecoderConfig{
+		DecodeHook:       mapstructure.StringToTimeDurationHookFunc(),
+		Result:           &entry,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := mapstructure.NewDecoder(decodeConf)
+	if err != nil {
+		return nil, err
+	}
+
+	return entry, decoder.Decode(raw)
+}
+
+func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
+	var raw map[string]interface{}
+	if err := json.Unmarshal(data, &raw); err != nil {
+		return nil, err
+	}
+
+	return DecodeConfigEntry(raw)
+}
+
+func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) {
+	var entries []ConfigEntry
+	for _, rawEntry := range raw {
+		entry, err := DecodeConfigEntry(rawEntry)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, entry)
+	}
+	return entries, nil
+}
+
+// ConfigEntries can be used to query the Config endpoints
+type ConfigEntries struct {
+	c *Client
+}
+
+// Config returns a handle to the Config endpoints
+func (c *Client) ConfigEntries() *ConfigEntries {
+	return &ConfigEntries{c}
+}
+
+func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
+	if kind == "" || name == "" {
+		return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
+	}
+
+	entry, err := makeConfigEntry(kind, name)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(conf.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if err := decodeBody(resp, entry); err != nil {
+		return nil, nil, err
+	}
+
+	return entry, qm, nil
+}
+
+func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
+	if kind == "" {
+		return nil, nil, fmt.Errorf("The kind parameter must not be empty")
+	}
+
+	r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(conf.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var raw []map[string]interface{}
+	if err := decodeBody(resp, &raw); err != nil {
+		return nil, nil, err
+	}
+
+	entries, err := decodeConfigEntrySlice(raw)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return entries, qm, nil
+}
+
+func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
+	return conf.set(entry, nil, w)
+}
+
+func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
+	return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
+}
+
+func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
+	r := conf.c.newRequest("PUT", "/v1/config")
+	r.setWriteOptions(w)
+	for param, value := range params {
+		r.params.Set(param, value)
+	}
+	r.obj = entry
+	rtt, resp, err := requireOK(conf.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(buf.String(), "true")
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return res, wm, nil
+}
+
+func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
+	if kind == "" || name == "" {
+		return nil, fmt.Errorf("Both kind and name parameters must not be empty")
+	}
+
+	r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
+	r.setWriteOptions(w)
+	rtt, resp, err := requireOK(conf.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
new file mode 100644
index 0000000..77acfbd
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
@@ -0,0 +1,200 @@
+package api
+
+import (
+	"encoding/json"
+	"time"
+)
+
+type ServiceRouterConfigEntry struct {
+	Kind string
+	Name string
+
+	Routes []ServiceRoute `json:",omitempty"`
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+func (e *ServiceRouterConfigEntry) GetKind() string        { return e.Kind }
+func (e *ServiceRouterConfigEntry) GetName() string        { return e.Name }
+func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceRoute struct {
+	Match       *ServiceRouteMatch       `json:",omitempty"`
+	Destination *ServiceRouteDestination `json:",omitempty"`
+}
+
+type ServiceRouteMatch struct {
+	HTTP *ServiceRouteHTTPMatch `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatch struct {
+	PathExact  string `json:",omitempty"`
+	PathPrefix string `json:",omitempty"`
+	PathRegex  string `json:",omitempty"`
+
+	Header     []ServiceRouteHTTPMatchHeader     `json:",omitempty"`
+	QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"`
+	Methods    []string                          `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatchHeader struct {
+	Name    string
+	Present bool   `json:",omitempty"`
+	Exact   string `json:",omitempty"`
+	Prefix  string `json:",omitempty"`
+	Suffix  string `json:",omitempty"`
+	Regex   string `json:",omitempty"`
+	Invert  bool   `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatchQueryParam struct {
+	Name    string
+	Present bool   `json:",omitempty"`
+	Exact   string `json:",omitempty"`
+	Regex   string `json:",omitempty"`
+}
+
+type ServiceRouteDestination struct {
+	Service               string        `json:",omitempty"`
+	ServiceSubset         string        `json:",omitempty"`
+	Namespace             string        `json:",omitempty"`
+	PrefixRewrite         string        `json:",omitempty"`
+	RequestTimeout        time.Duration `json:",omitempty"`
+	NumRetries            uint32        `json:",omitempty"`
+	RetryOnConnectFailure bool          `json:",omitempty"`
+	RetryOnStatusCodes    []uint32      `json:",omitempty"`
+}
+
+func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) {
+	type Alias ServiceRouteDestination
+	exported := &struct {
+		RequestTimeout string `json:",omitempty"`
+		*Alias
+	}{
+		RequestTimeout: e.RequestTimeout.String(),
+		Alias:          (*Alias)(e),
+	}
+	if e.RequestTimeout == 0 {
+		exported.RequestTimeout = ""
+	}
+
+	return json.Marshal(exported)
+}
+
+func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
+	type Alias ServiceRouteDestination
+	aux := &struct {
+		RequestTimeout string
+		*Alias
+	}{
+		Alias: (*Alias)(e),
+	}
+	if err := json.Unmarshal(data, &aux); err != nil {
+		return err
+	}
+	var err error
+	if aux.RequestTimeout != "" {
+		if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type ServiceSplitterConfigEntry struct {
+	Kind string
+	Name string
+
+	Splits []ServiceSplit `json:",omitempty"`
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+func (e *ServiceSplitterConfigEntry) GetKind() string        { return e.Kind }
+func (e *ServiceSplitterConfigEntry) GetName() string        { return e.Name }
+func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceSplit struct {
+	Weight        float32
+	Service       string `json:",omitempty"`
+	ServiceSubset string `json:",omitempty"`
+	Namespace     string `json:",omitempty"`
+}
+
+type ServiceResolverConfigEntry struct {
+	Kind string
+	Name string
+
+	DefaultSubset  string                             `json:",omitempty"`
+	Subsets        map[string]ServiceResolverSubset   `json:",omitempty"`
+	Redirect       *ServiceResolverRedirect           `json:",omitempty"`
+	Failover       map[string]ServiceResolverFailover `json:",omitempty"`
+	ConnectTimeout time.Duration                      `json:",omitempty"`
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) {
+	type Alias ServiceResolverConfigEntry
+	exported := &struct {
+		ConnectTimeout string `json:",omitempty"`
+		*Alias
+	}{
+		ConnectTimeout: e.ConnectTimeout.String(),
+		Alias:          (*Alias)(e),
+	}
+	if e.ConnectTimeout == 0 {
+		exported.ConnectTimeout = ""
+	}
+
+	return json.Marshal(exported)
+}
+
+func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
+	type Alias ServiceResolverConfigEntry
+	aux := &struct {
+		ConnectTimeout string
+		*Alias
+	}{
+		Alias: (*Alias)(e),
+	}
+	if err := json.Unmarshal(data, &aux); err != nil {
+		return err
+	}
+	var err error
+	if aux.ConnectTimeout != "" {
+		if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e *ServiceResolverConfigEntry) GetKind() string        { return e.Kind }
+func (e *ServiceResolverConfigEntry) GetName() string        { return e.Name }
+func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceResolverSubset struct {
+	Filter      string `json:",omitempty"`
+	OnlyPassing bool   `json:",omitempty"`
+}
+
+type ServiceResolverRedirect struct {
+	Service       string `json:",omitempty"`
+	ServiceSubset string `json:",omitempty"`
+	Namespace     string `json:",omitempty"`
+	Datacenter    string `json:",omitempty"`
+}
+
+type ServiceResolverFailover struct {
+	Service       string   `json:",omitempty"`
+	ServiceSubset string   `json:",omitempty"`
+	Namespace     string   `json:",omitempty"`
+	Datacenters   []string `json:",omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go
index a996c03..d25cb84 100644
--- a/vendor/github.com/hashicorp/consul/api/connect_intention.go
+++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go
@@ -54,6 +54,13 @@
 	// or modified.
 	CreatedAt, UpdatedAt time.Time
 
+	// Hash of the contents of the intention
+	//
+	// This is needed mainly for replication purposes. When replicating from
+	// one DC to another keeping the content Hash will allow us to detect
+	// content changes more efficiently than checking every single field
+	Hash []byte
+
 	CreateIndex uint64
 	ModifyIndex uint64
 }
diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
new file mode 100644
index 0000000..407a3b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
@@ -0,0 +1,230 @@
+package api
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// DiscoveryChain can be used to query the discovery-chain endpoints
+type DiscoveryChain struct {
+	c *Client
+}
+
+// DiscoveryChain returns a handle to the discovery-chain endpoints
+func (c *Client) DiscoveryChain() *DiscoveryChain {
+	return &DiscoveryChain{c}
+}
+
+func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) {
+	if name == "" {
+		return nil, nil, fmt.Errorf("Name parameter must not be empty")
+	}
+
+	method := "GET"
+	if opts != nil && opts.requiresPOST() {
+		method = "POST"
+	}
+
+	r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name))
+	r.setQueryOptions(q)
+
+	if opts != nil {
+		if opts.EvaluateInDatacenter != "" {
+			r.params.Set("compile-dc", opts.EvaluateInDatacenter)
+		}
+		// TODO(namespaces): handle possible EvaluateInNamespace here
+	}
+
+	if method == "POST" {
+		r.obj = opts
+	}
+
+	rtt, resp, err := requireOK(d.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out DiscoveryChainResponse
+
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+type DiscoveryChainOptions struct {
+	EvaluateInDatacenter string `json:"-"`
+
+	// OverrideMeshGateway allows for the mesh gateway setting to be overridden
+	// for any resolver in the compiled chain.
+	OverrideMeshGateway MeshGatewayConfig `json:",omitempty"`
+
+	// OverrideProtocol allows for the final protocol for the chain to be
+	// altered.
+	//
+	// - If the chain ordinarily would be TCP and an L7 protocol is passed here
+	// the chain will not include Routers or Splitters.
+	//
+	// - If the chain ordinarily would be L7 and TCP is passed here the chain
+	// will not include Routers or Splitters.
+	OverrideProtocol string `json:",omitempty"`
+
+	// OverrideConnectTimeout allows for the ConnectTimeout setting to be
+	// overridden for any resolver in the compiled chain.
+	OverrideConnectTimeout time.Duration `json:",omitempty"`
+}
+
+func (o *DiscoveryChainOptions) requiresPOST() bool {
+	if o == nil {
+		return false
+	}
+	return o.OverrideMeshGateway.Mode != "" ||
+		o.OverrideProtocol != "" ||
+		o.OverrideConnectTimeout != 0
+}
+
+type DiscoveryChainResponse struct {
+	Chain *CompiledDiscoveryChain
+}
+
+type CompiledDiscoveryChain struct {
+	ServiceName string
+	Namespace   string
+	Datacenter  string
+
+	// CustomizationHash is a unique hash of any data that affects the
+	// compilation of the discovery chain other than config entries or the
+	// name/namespace/datacenter evaluation criteria.
+	//
+	// If set, this value should be used to prefix/suffix any generated load
+	// balancer data plane objects to avoid sharing customized and
+	// non-customized versions.
+	CustomizationHash string
+
+	// Protocol is the overall protocol shared by everything in the chain.
+	Protocol string
+
+	// StartNode is the first key into the Nodes map that should be followed
+	// when walking the discovery chain.
+	StartNode string
+
+	// Nodes contains all nodes available for traversal in the chain keyed by a
+	// unique name.  You can walk this by starting with StartNode.
+	//
+	// NOTE: The names should be treated as opaque values and are only
+	// guaranteed to be consistent within a single compilation.
+	Nodes map[string]*DiscoveryGraphNode
+
+	// Targets is a list of all targets used in this chain.
+	//
+	// NOTE: The names should be treated as opaque values and are only
+	// guaranteed to be consistent within a single compilation.
+	Targets map[string]*DiscoveryTarget
+}
+
+const (
+	DiscoveryGraphNodeTypeRouter   = "router"
+	DiscoveryGraphNodeTypeSplitter = "splitter"
+	DiscoveryGraphNodeTypeResolver = "resolver"
+)
+
+// DiscoveryGraphNode is a single node in the compiled discovery chain.
+type DiscoveryGraphNode struct {
+	Type string
+	Name string // this is NOT necessarily a service
+
+	// fields for Type==router
+	Routes []*DiscoveryRoute
+
+	// fields for Type==splitter
+	Splits []*DiscoverySplit
+
+	// fields for Type==resolver
+	Resolver *DiscoveryResolver
+}
+
+// compiled form of ServiceRoute
+type DiscoveryRoute struct {
+	Definition *ServiceRoute
+	NextNode   string
+}
+
+// compiled form of ServiceSplit
+type DiscoverySplit struct {
+	Weight   float32
+	NextNode string
+}
+
+// compiled form of ServiceResolverConfigEntry
+type DiscoveryResolver struct {
+	Default        bool
+	ConnectTimeout time.Duration
+	Target         string
+	Failover       *DiscoveryFailover
+}
+
+func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) {
+	type Alias DiscoveryResolver
+	exported := &struct {
+		ConnectTimeout string `json:",omitempty"`
+		*Alias
+	}{
+		ConnectTimeout: r.ConnectTimeout.String(),
+		Alias:          (*Alias)(r),
+	}
+	if r.ConnectTimeout == 0 {
+		exported.ConnectTimeout = ""
+	}
+
+	return json.Marshal(exported)
+}
+
+func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error {
+	type Alias DiscoveryResolver
+	aux := &struct {
+		ConnectTimeout string
+		*Alias
+	}{
+		Alias: (*Alias)(r),
+	}
+	if err := json.Unmarshal(data, &aux); err != nil {
+		return err
+	}
+	var err error
+	if aux.ConnectTimeout != "" {
+		if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// compiled form of ServiceResolverFailover
+type DiscoveryFailover struct {
+	Targets []string
+}
+
+// DiscoveryTarget represents all of the inputs necessary to use a resolver
+// config entry to execute a catalog query to generate a list of service
+// instances during discovery.
+type DiscoveryTarget struct {
+	ID string
+
+	Service       string
+	ServiceSubset string
+	Namespace     string
+	Datacenter    string
+
+	MeshGateway MeshGatewayConfig
+	Subset      ServiceResolverSubset
+	External    bool
+	SNI         string
+	Name        string
+}
diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod
new file mode 100644
index 0000000..78fe8a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/go.mod
@@ -0,0 +1,16 @@
+module github.com/hashicorp/consul/api
+
+go 1.12
+
+replace github.com/hashicorp/consul/sdk => ../sdk
+
+require (
+	github.com/hashicorp/consul/sdk v0.2.0
+	github.com/hashicorp/go-cleanhttp v0.5.1
+	github.com/hashicorp/go-rootcerts v1.0.0
+	github.com/hashicorp/go-uuid v1.0.1
+	github.com/hashicorp/serf v0.8.2
+	github.com/mitchellh/mapstructure v1.1.2
+	github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c
+	github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum
new file mode 100644
index 0000000..01591f9
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/go.sum
@@ -0,0 +1,78 @@
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/hashicorp/consul/sdk v0.2.0 h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=
+github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go
new file mode 100644
index 0000000..25aa702
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_license.go
@@ -0,0 +1,111 @@
+package api
+
+import (
+	"io/ioutil"
+	"strings"
+	"time"
+)
+
+type License struct {
+	// The unique identifier of the license
+	LicenseID string `json:"license_id"`
+
+	// The customer ID associated with the license
+	CustomerID string `json:"customer_id"`
+
+	// If set, an identifier that should be used to lock the license to a
+	// particular site, cluster, etc.
+	InstallationID string `json:"installation_id"`
+
+	// The time at which the license was issued
+	IssueTime time.Time `json:"issue_time"`
+
+	// The time at which the license starts being valid
+	StartTime time.Time `json:"start_time"`
+
+	// The time after which the license expires
+	ExpirationTime time.Time `json:"expiration_time"`
+
+	// The time at which the license ceases to function and can
+	// no longer be used in any capacity
+	TerminationTime time.Time `json:"termination_time"`
+
+	// The product the license is valid for
+	Product string `json:"product"`
+
+	// License Specific Flags
+	Flags map[string]interface{} `json:"flags"`
+
+	// List of features enabled by the license
+	Features []string `json:"features"`
+}
+
+type LicenseReply struct {
+	Valid    bool
+	License  *License
+	Warnings []string
+}
+
+func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) {
+	var reply LicenseReply
+	if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil {
+		return nil, err
+	} else {
+		return &reply, nil
+	}
+}
+
+func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
+	r := op.c.newRequest("GET", "/v1/operator/license")
+	r.params.Set("signed", "1")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	data, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", err
+	}
+
+	return string(data), nil
+}
+
+// LicenseReset will reset the license to the builtin one if it is still valid.
+// If the builtin license is invalid, the current license stays active.
+func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
+	var reply LicenseReply
+	r := op.c.newRequest("DELETE", "/v1/operator/license")
+	r.setWriteOptions(opts)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	if err := decodeBody(resp, &reply); err != nil {
+		return nil, err
+	}
+
+	return &reply, nil
+}
+
+func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
+	var reply LicenseReply
+	r := op.c.newRequest("PUT", "/v1/operator/license")
+	r.setWriteOptions(opts)
+	r.body = strings.NewReader(license)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	if err := decodeBody(resp, &reply); err != nil {
+		return nil, err
+	}
+
+	return &reply, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss b/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss
index 3d0a22d..6b74e61 100644
--- a/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss
+++ b/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss
@@ -1,4 +1,23 @@
-@import './notice/index';
+@import '../base/components/notice/index';
+%notice {
+  margin-bottom: 1em;
+}
+%notice-success::before {
+  @extend %with-check-circle-fill-color-icon;
+}
+%notice-info::before {
+  @extend %with-info-circle-fill-color-icon;
+}
+%notice-highlight::before {
+  @extend %with-star-icon;
+}
+%notice-warning::before {
+  @extend %with-alert-triangle-color-icon;
+}
+%notice-error::before {
+  @extend %with-cancel-square-fill-color-icon;
+}
+/**/
 .notice.warning {
   @extend %notice-warning;
 }
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/hashicorp/consul/ui-v2/app/utils/dom/event-target/event-target-shim/LICENSE
similarity index 94%
rename from vendor/github.com/json-iterator/go/LICENSE
rename to vendor/github.com/hashicorp/consul/ui-v2/app/utils/dom/event-target/event-target-shim/LICENSE
index 2cf4f5a..c39e694 100644
--- a/vendor/github.com/json-iterator/go/LICENSE
+++ b/vendor/github.com/hashicorp/consul/ui-v2/app/utils/dom/event-target/event-target-shim/LICENSE
@@ -1,6 +1,6 @@
-MIT License
+The MIT License (MIT)
 
-Copyright (c) 2016 json-iterator
+Copyright (c) 2015 Toru Nagashima
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -19,3 +19,4 @@
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
+
diff --git a/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md b/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md
index dc2c2c3..5a52c22 100644
--- a/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md
+++ b/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md
@@ -24,10 +24,10 @@
 | `GET` | `/operator/license`           | `application/json`         |
 
 The table below shows this endpoint's support for
-[blocking queries](/api/index.html#blocking-queries),
-[consistency modes](/api/index.html#consistency-modes),
-[agent caching](/api/index.html#agent-caching), and
-[required ACLs](/api/index.html#acls).
+[blocking queries](/api/features/blocking.html),
+[consistency modes](/api/features/consistency.html),
+[agent caching](/api/features/caching.html), and
+[required ACLs](/api/index.html#authentication).
 
 | Blocking Queries | Consistency Modes | Agent Caching | ACL Required     |
 | ---------------- | ----------------- | ------------- | ---------------- |
@@ -86,10 +86,10 @@
 | `PUT` | `/operator/license`           | `application/json`         |
 
 The table below shows this endpoint's support for
-[blocking queries](/api/index.html#blocking-queries),
-[consistency modes](/api/index.html#consistency-modes),
-[agent caching](/api/index.html#agent-caching), and
-[required ACLs](/api/index.html#acls).
+[blocking queries](/api/features/blocking.html),
+[consistency modes](/api/features/consistency.html),
+[agent caching](/api/features/caching.html), and
+[required ACLs](/api/index.html#authentication).
 
 | Blocking Queries | Consistency Modes | Agent Caching | ACL Required     |
 | ---------------- | ----------------- | ------------- | ---------------- |
@@ -143,3 +143,65 @@
     "Warnings": []
 }
 ```
+
+## Resetting the Consul License
+
+This endpoint resets the Consul license to the license included in the Enterprise binary. If the included license is not valid, the replace will fail.
+
+| Method   | Path                         | Produces                   |
+| -------- | ---------------------------- | -------------------------- |
+| `DELETE` | `/operator/license`          | `application/json`         |
+
+The table below shows this endpoint's support for
+[blocking queries](/api/features/blocking.html),
+[consistency modes](/api/features/consistency.html),
+[agent caching](/api/features/caching.html), and
+[required ACLs](/api/index.html#authentication).
+
+| Blocking Queries | Consistency Modes | Agent Caching | ACL Required     |
+| ---------------- | ----------------- | ------------- | ---------------- |
+| `NO`             | `none`            | `none`        | `operator:write` |
+
+### Parameters
+
+- `dc` `(string: "")` - Specifies the datacenter whose license should be updated.
+  This will default to the datacenter of the agent serving the HTTP request.
+  This is specified as a URL query parameter.
+
+### Sample Request
+
+```text
+$ curl \
+    --request DELETE \
+    http://127.0.0.1:8500/v1/operator/license
+```
+
+### Sample Response
+
+```json
+{
+    "Valid": true,
+    "License": {
+        "license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c",
+        "customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b",
+        "installation_id": "*",
+        "issue_time": "2018-05-21T20:03:35.911567355Z",
+        "start_time": "2018-05-21T04:00:00Z",
+        "expiration_time": "2019-05-22T03:59:59.999Z",
+        "product": "consul",
+        "flags": {
+            "package": "premium"
+        },
+        "features": [
+            "Automated Backups",
+            "Automated Upgrades",
+            "Enhanced Read Scalability",
+            "Network Segments",
+            "Redundancy Zone",
+            "Advanced Network Federation"
+        ],
+        "temporary": false
+    },
+    "Warnings": []
+}
+```
diff --git a/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb b/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb
index b65d171..e397a13 100644
--- a/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb
+++ b/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb
@@ -17,7 +17,7 @@
 If ACLs are enabled then a token with operator privileges may be required in
 order to use this command. Requests are forwarded internally to the leader
 if required, so this can be run from any Consul node in a cluster. See the
-[ACL Guide](/docs/guides/acl.html#operator) for more information.
+[ACL Guide](https://learn.hashicorp.com/consul/security-networking/production-acls) for more information.
 
 
 ```text
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
new file mode 100644
index 0000000..dd7c0ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
@@ -0,0 +1,9 @@
+# 1.1.0 (May 22nd, 2019)
+
+FEATURES
+
+* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
+
+# 1.0.0 (August 30th, 2018)
+
+* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
index 8910fcc..4b6338b 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/README.md
+++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md
@@ -39,3 +39,28 @@
 }
 ```
 
+Here is an example of performing a range scan of the keys.
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("001"), 1)
+r, _, _ = r.Insert([]byte("002"), 2)
+r, _, _ = r.Insert([]byte("005"), 5)
+r, _, _ = r.Insert([]byte("010"), 10)
+r, _, _ = r.Insert([]byte("100"), 10)
+
+// Range scan over the keys that sort lexicographically between [003, 050)
+it := r.Root().Iterator()
+it.SeekLowerBound([]byte("003"))
+for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
+  if key >= "050" {
+      break
+  }
+  fmt.Println(key)
+}
+// Output:
+//  005
+//  010
+```
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
index 9815e02..1ecaf83 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
@@ -1,6 +1,8 @@
 package iradix
 
-import "bytes"
+import (
+	"bytes"
+)
 
 // Iterator is used to iterate over a set of nodes
 // in pre-order
@@ -53,6 +55,101 @@
 	i.SeekPrefixWatch(prefix)
 }
 
+func (i *Iterator) recurseMin(n *Node) *Node {
+	// Traverse to the minimum child
+	if n.leaf != nil {
+		return n
+	}
+	if len(n.edges) > 0 {
+		// Add all the other edges to the stack (the min node will be added as
+		// we recurse)
+		i.stack = append(i.stack, n.edges[1:])
+		return i.recurseMin(n.edges[0].node)
+	}
+	// Shouldn't be possible
+	return nil
+}
+
+// SeekLowerBound is used to seek the iterator to the smallest key that is
+// greater or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (i *Iterator) SeekLowerBound(key []byte) {
+	// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+	// go because we need only a subset of edges of many nodes in the path to the
+	// leaf with the lower bound.
+	i.stack = []edges{}
+	n := i.node
+	search := key
+
+	found := func(n *Node) {
+		i.node = n
+		i.stack = append(i.stack, edges{edge{node: n}})
+	}
+
+	for {
+		// Compare current prefix with the search key's same-length prefix.
+		var prefixCmp int
+		if len(n.prefix) < len(search) {
+			prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+		} else {
+			prefixCmp = bytes.Compare(n.prefix, search)
+		}
+
+		if prefixCmp > 0 {
+			// Prefix is larger, that means the lower bound is greater than the search
+			// and from now on we need to follow the minimum path to the smallest
+			// leaf under this subtree.
+			n = i.recurseMin(n)
+			if n != nil {
+				found(n)
+			}
+			return
+		}
+
+		if prefixCmp < 0 {
+			// Prefix is smaller than search prefix, that means there is no lower
+			// bound
+			i.node = nil
+			return
+		}
+
+		// Prefix is equal, we are still heading for an exact match. If this is a
+		// leaf we're done.
+		if n.leaf != nil {
+			if bytes.Compare(n.leaf.key, key) < 0 {
+				i.node = nil
+				return
+			}
+			found(n)
+			return
+		}
+
+		// Consume the search prefix
+		if len(n.prefix) > len(search) {
+			search = []byte{}
+		} else {
+			search = search[len(n.prefix):]
+		}
+
+		// Otherwise, take the lower bound next edge.
+		idx, lbNode := n.getLowerBoundEdge(search[0])
+		if lbNode == nil {
+			i.node = nil
+			return
+		}
+
+		// Create stack edges for the all strictly higher edges in this node.
+		if idx+1 < len(n.edges) {
+			i.stack = append(i.stack, n.edges[idx+1:])
+		}
+
+		i.node = lbNode
+		// Recurse
+		n = lbNode
+	}
+}
+
 // Next returns the next node in order
 func (i *Iterator) Next() ([]byte, interface{}, bool) {
 	// Initialize our stack if needed
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
index 7a065e7..3ab904e 100644
--- a/vendor/github.com/hashicorp/go-immutable-radix/node.go
+++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go
@@ -79,6 +79,18 @@
 	return -1, nil
 }
 
+func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	// we want lower bound behavior so return even if it's not an exact match
+	if idx < num {
+		return idx, n.edges[idx].node
+	}
+	return -1, nil
+}
+
 func (n *Node) delEdge(label byte) {
 	num := len(n.edges)
 	idx := sort.Search(num, func(i int) bool {
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.mod b/vendor/github.com/hashicorp/go-rootcerts/go.mod
index 3c0e0e6..e2dd024 100644
--- a/vendor/github.com/hashicorp/go-rootcerts/go.mod
+++ b/vendor/github.com/hashicorp/go-rootcerts/go.mod
@@ -1,3 +1,5 @@
 module github.com/hashicorp/go-rootcerts
 
-require github.com/mitchellh/go-homedir v1.0.0
+go 1.12
+
+require github.com/mitchellh/go-homedir v1.1.0
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.sum b/vendor/github.com/hashicorp/go-rootcerts/go.sum
index d12bb75..ae38d14 100644
--- a/vendor/github.com/hashicorp/go-rootcerts/go.sum
+++ b/vendor/github.com/hashicorp/go-rootcerts/go.sum
@@ -1,2 +1,2 @@
-github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml
new file mode 100644
index 0000000..7698490
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+sudo: false
+
+go:
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip
+
+script:
+  - go test -bench . -benchmem -v ./...
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 0000000..fbde8b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
+# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
+
+Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod
new file mode 100644
index 0000000..dd57f9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/go-uuid
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 0000000..911227f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,65 @@
+package uuid
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+)
+
+// GenerateRandomBytes is used to generate random bytes of given size.
+func GenerateRandomBytes(size int) ([]byte, error) {
+	buf := make([]byte, size)
+	if _, err := rand.Read(buf); err != nil {
+		return nil, fmt.Errorf("failed to read random bytes: %v", err)
+	}
+	return buf, nil
+}
+
+const uuidLen = 16
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+	buf, err := GenerateRandomBytes(uuidLen)
+	if err != nil {
+		return "", err
+	}
+	return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+	if buflen := len(buf); buflen != uuidLen {
+		return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
+	}
+
+	return fmt.Sprintf("%x-%x-%x-%x-%x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+	if len(uuid) != 2 * uuidLen + 4 {
+		return nil, fmt.Errorf("uuid string is wrong length")
+	}
+
+	if uuid[8] != '-' ||
+		uuid[13] != '-' ||
+		uuid[18] != '-' ||
+		uuid[23] != '-' {
+		return nil, fmt.Errorf("uuid is improperly formatted")
+	}
+
+	hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+	ret, err := hex.DecodeString(hexStr)
+	if err != nil {
+		return nil, err
+	}
+	if len(ret) != uuidLen {
+		return nil, fmt.Errorf("decoded hex is the wrong length")
+	}
+
+	return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
index 5673773..a86c853 100644
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -73,6 +73,9 @@
 func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
 	if ent, ok := c.items[key]; ok {
 		c.evictList.MoveToFront(ent)
+		if ent.Value.(*entry) == nil {
+			return nil, false
+		}
 		return ent.Value.(*entry).value, true
 	}
 	return
@@ -142,6 +145,19 @@
 	return c.evictList.Len()
 }
 
+// Resize changes the cache size.
+func (c *LRU) Resize(size int) (evicted int) {
+	diff := c.Len() - size
+	if diff < 0 {
+		diff = 0
+	}
+	for i := 0; i < diff; i++ {
+		c.removeOldest()
+	}
+	c.size = size
+	return diff
+}
+
 // removeOldest removes the oldest item from the cache.
 func (c *LRU) removeOldest() {
 	ent := c.evictList.Back()
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
index 74c7077..92d7093 100644
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -10,7 +10,7 @@
 	// updates the "recently used"-ness of the key. #value, isFound
 	Get(key interface{}) (value interface{}, ok bool)
 
-	// Check if a key exsists in cache without updating the recent-ness.
+	// Checks if a key exists in cache without updating the recent-ness.
 	Contains(key interface{}) (ok bool)
 
 	// Returns key's value without updating the "recently used"-ness of the key.
@@ -31,6 +31,9 @@
 	// Returns the number of items in the cache.
 	Len() int
 
-	// Clear all cache entries
+	// Clears all cache entries.
 	Purge()
+
+  // Resizes cache, returning number evicted
+  Resize(int) int
 }
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
deleted file mode 100644
index 529c341..0000000
--- a/vendor/github.com/imdario/mergo/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-#### joe made this: http://goel.io/joe
-
-#### go ####
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-
-#### vim ####
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-v][a-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-
-# Temporary
-.netrwhist
-*~
-# Auto-generated tag files
-tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
deleted file mode 100644
index b13a50e..0000000
--- a/vendor/github.com/imdario/mergo/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-install:
-  - go get -t
-  - go get golang.org/x/tools/cmd/cover
-  - go get github.com/mattn/goveralls
-script:
-  - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
deleted file mode 100644
index 469b449..0000000
--- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
deleted file mode 100644
index 6866802..0000000
--- a/vendor/github.com/imdario/mergo/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2013 Dario Castañé. All rights reserved.
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
deleted file mode 100644
index 02fc81e..0000000
--- a/vendor/github.com/imdario/mergo/README.md
+++ /dev/null
@@ -1,238 +0,0 @@
-# Mergo
-
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
-
-[![GoDoc][3]][4]
-[![GoCard][5]][6]
-[![Build Status][1]][2]
-[![Coverage Status][7]][8]
-[![Sourcegraph][9]][10]
-[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
-
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
-[3]: https://godoc.org/github.com/imdario/mergo?status.svg
-[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://goreportcard.com/badge/imdario/mergo
-[6]: https://goreportcard.com/report/github.com/imdario/mergo
-[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[8]: https://coveralls.io/github/imdario/mergo?branch=master
-[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
-
-### Latest release
-
-[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
-
-### Important note
-
-Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
-
-If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
-
-### Donations
-
-If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
-
-<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
-[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
-[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
-<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
-
-### Mergo in the wild
-
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-
-## Installation
-
-    go get github.com/imdario/mergo
-
-    // use in your .go code
-    import (
-        "github.com/imdario/mergo"
-    )
-
-## Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
-```go
-if err := mergo.Merge(&dst, src); err != nil {
-    // ...
-}
-```
-
-Also, you can merge overwriting values using the transformer `WithOverride`.
-
-```go
-if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
-    // ...
-}
-```
-
-Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
-
-```go
-if err := mergo.Map(&dst, srcMap); err != nil {
-    // ...
-}
-```
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-
-More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
-
-### Nice example
-
-```go
-package main
-
-import (
-	"fmt"
-	"github.com/imdario/mergo"
-)
-
-type Foo struct {
-	A string
-	B int64
-}
-
-func main() {
-	src := Foo{
-		A: "one",
-		B: 2,
-	}
-	dest := Foo{
-		A: "two",
-	}
-	mergo.Merge(&dest, src)
-	fmt.Println(dest)
-	// Will print
-	// {two 2}
-}
-```
-
-Note: if test are failing due missing package, please execute:
-
-    go get gopkg.in/yaml.v2
-
-### Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
-
-```go
-package main
-
-import (
-	"fmt"
-	"github.com/imdario/mergo"
-        "reflect"
-        "time"
-)
-
-type timeTransfomer struct {
-}
-
-func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
-	if typ == reflect.TypeOf(time.Time{}) {
-		return func(dst, src reflect.Value) error {
-			if dst.CanSet() {
-				isZero := dst.MethodByName("IsZero")
-				result := isZero.Call([]reflect.Value{})
-				if result[0].Bool() {
-					dst.Set(src)
-				}
-			}
-			return nil
-		}
-	}
-	return nil
-}
-
-type Snapshot struct {
-	Time time.Time
-	// ...
-}
-
-func main() {
-	src := Snapshot{time.Now()}
-	dest := Snapshot{}
-	mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
-	fmt.Println(dest)
-	// Will print
-	// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
-}
-```
-
-
-## Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
-
-## About
-
-Written by [Dario Castañé](http://dario.im).
-
-## Top Contributors
-
-[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
-## License
-
-[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
-
-[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
deleted file mode 100644
index 6e9aa7b..0000000
--- a/vendor/github.com/imdario/mergo/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package mergo merges same-type structs and maps by setting default values in zero-value fields.
-
-Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Usage
-
-From my own work-in-progress project:
-
-	type networkConfig struct {
-		Protocol string
-		Address string
-		ServerType string `json: "server_type"`
-		Port uint16
-	}
-
-	type FssnConfig struct {
-		Network networkConfig
-	}
-
-	var fssnDefault = FssnConfig {
-		networkConfig {
-			"tcp",
-			"127.0.0.1",
-			"http",
-			31560,
-		},
-	}
-
-	// Inside a function [...]
-
-	if err := mergo.Merge(&config, fssnDefault); err != nil {
-		log.Fatal(err)
-	}
-
-	// More code [...]
-
-*/
-package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
deleted file mode 100644
index 3f5afa8..0000000
--- a/vendor/github.com/imdario/mergo/map.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2014 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
-	"fmt"
-	"reflect"
-	"unicode"
-	"unicode/utf8"
-)
-
-func changeInitialCase(s string, mapper func(rune) rune) string {
-	if s == "" {
-		return s
-	}
-	r, n := utf8.DecodeRuneInString(s)
-	return string(mapper(r)) + s[n:]
-}
-
-func isExported(field reflect.StructField) bool {
-	r, _ := utf8.DecodeRuneInString(field.Name)
-	return r >= 'A' && r <= 'Z'
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
-	overwrite := config.Overwrite
-	if dst.CanAddr() {
-		addr := dst.UnsafeAddr()
-		h := 17 * addr
-		seen := visited[h]
-		typ := dst.Type()
-		for p := seen; p != nil; p = p.next {
-			if p.ptr == addr && p.typ == typ {
-				return nil
-			}
-		}
-		// Remember, remember...
-		visited[h] = &visit{addr, typ, seen}
-	}
-	zeroValue := reflect.Value{}
-	switch dst.Kind() {
-	case reflect.Map:
-		dstMap := dst.Interface().(map[string]interface{})
-		for i, n := 0, src.NumField(); i < n; i++ {
-			srcType := src.Type()
-			field := srcType.Field(i)
-			if !isExported(field) {
-				continue
-			}
-			fieldName := field.Name
-			fieldName = changeInitialCase(fieldName, unicode.ToLower)
-			if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
-				dstMap[fieldName] = src.Field(i).Interface()
-			}
-		}
-	case reflect.Ptr:
-		if dst.IsNil() {
-			v := reflect.New(dst.Type().Elem())
-			dst.Set(v)
-		}
-		dst = dst.Elem()
-		fallthrough
-	case reflect.Struct:
-		srcMap := src.Interface().(map[string]interface{})
-		for key := range srcMap {
-			config.overwriteWithEmptyValue = true
-			srcValue := srcMap[key]
-			fieldName := changeInitialCase(key, unicode.ToUpper)
-			dstElement := dst.FieldByName(fieldName)
-			if dstElement == zeroValue {
-				// We discard it because the field doesn't exist.
-				continue
-			}
-			srcElement := reflect.ValueOf(srcValue)
-			dstKind := dstElement.Kind()
-			srcKind := srcElement.Kind()
-			if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
-				srcElement = srcElement.Elem()
-				srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
-			} else if dstKind == reflect.Ptr {
-				// Can this work? I guess it can't.
-				if srcKind != reflect.Ptr && srcElement.CanAddr() {
-					srcPtr := srcElement.Addr()
-					srcElement = reflect.ValueOf(srcPtr)
-					srcKind = reflect.Ptr
-				}
-			}
-
-			if !srcElement.IsValid() {
-				continue
-			}
-			if srcKind == dstKind {
-				if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
-					return
-				}
-			} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
-				if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
-					return
-				}
-			} else if srcKind == reflect.Map {
-				if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
-					return
-				}
-			} else {
-				return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
-			}
-		}
-	}
-	return
-}
-
-// Map sets fields' values in dst from src.
-// src can be a map with string keys or a struct. dst must be the opposite:
-// if src is a map, dst must be a valid pointer to struct. If src is a struct,
-// dst must be map[string]interface{}.
-// It won't merge unexported (private) fields and will do recursively
-// any exported field.
-// If dst is a map, keys will be src fields' names in lower camel case.
-// Missing key in src that doesn't match a field in dst will be skipped. This
-// doesn't apply if dst is a map.
-// This is separated method from Merge because it is cleaner and it keeps sane
-// semantics: merging equal types, mapping different (restricted) types.
-func Map(dst, src interface{}, opts ...func(*Config)) error {
-	return _map(dst, src, opts...)
-}
-
-// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: Use Map(…) with WithOverride
-func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
-	return _map(dst, src, append(opts, WithOverride)...)
-}
-
-func _map(dst, src interface{}, opts ...func(*Config)) error {
-	var (
-		vDst, vSrc reflect.Value
-		err        error
-	)
-	config := &Config{}
-
-	for _, opt := range opts {
-		opt(config)
-	}
-
-	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
-		return err
-	}
-	// To be friction-less, we redirect equal-type arguments
-	// to deepMerge. Only because arguments can be anything.
-	if vSrc.Kind() == vDst.Kind() {
-		return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-	}
-	switch vSrc.Kind() {
-	case reflect.Struct:
-		if vDst.Kind() != reflect.Map {
-			return ErrExpectedMapAsDestination
-		}
-	case reflect.Map:
-		if vDst.Kind() != reflect.Struct {
-			return ErrExpectedStructAsDestination
-		}
-	default:
-		return ErrNotSupported
-	}
-	return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
deleted file mode 100644
index f8de6c5..0000000
--- a/vendor/github.com/imdario/mergo/merge.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
-	"fmt"
-	"reflect"
-)
-
-func hasExportedField(dst reflect.Value) (exported bool) {
-	for i, n := 0, dst.NumField(); i < n; i++ {
-		field := dst.Type().Field(i)
-		if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
-			exported = exported || hasExportedField(dst.Field(i))
-		} else {
-			exported = exported || len(field.PkgPath) == 0
-		}
-	}
-	return
-}
-
-type Config struct {
-	Overwrite               bool
-	AppendSlice             bool
-	Transformers            Transformers
-	overwriteWithEmptyValue bool
-}
-
-type Transformers interface {
-	Transformer(reflect.Type) func(dst, src reflect.Value) error
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
-	overwrite := config.Overwrite
-	overwriteWithEmptySrc := config.overwriteWithEmptyValue
-	config.overwriteWithEmptyValue = false
-
-	if !src.IsValid() {
-		return
-	}
-	if dst.CanAddr() {
-		addr := dst.UnsafeAddr()
-		h := 17 * addr
-		seen := visited[h]
-		typ := dst.Type()
-		for p := seen; p != nil; p = p.next {
-			if p.ptr == addr && p.typ == typ {
-				return nil
-			}
-		}
-		// Remember, remember...
-		visited[h] = &visit{addr, typ, seen}
-	}
-
-	if config.Transformers != nil && !isEmptyValue(dst) {
-		if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
-			err = fn(dst, src)
-			return
-		}
-	}
-
-	switch dst.Kind() {
-	case reflect.Struct:
-		if hasExportedField(dst) {
-			for i, n := 0, dst.NumField(); i < n; i++ {
-				if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
-					return
-				}
-			}
-		} else {
-			if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
-				dst.Set(src)
-			}
-		}
-	case reflect.Map:
-		if dst.IsNil() && !src.IsNil() {
-			dst.Set(reflect.MakeMap(dst.Type()))
-		}
-		for _, key := range src.MapKeys() {
-			srcElement := src.MapIndex(key)
-			if !srcElement.IsValid() {
-				continue
-			}
-			dstElement := dst.MapIndex(key)
-			switch srcElement.Kind() {
-			case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
-				if srcElement.IsNil() {
-					continue
-				}
-				fallthrough
-			default:
-				if !srcElement.CanInterface() {
-					continue
-				}
-				switch reflect.TypeOf(srcElement.Interface()).Kind() {
-				case reflect.Struct:
-					fallthrough
-				case reflect.Ptr:
-					fallthrough
-				case reflect.Map:
-					srcMapElm := srcElement
-					dstMapElm := dstElement
-					if srcMapElm.CanInterface() {
-						srcMapElm = reflect.ValueOf(srcMapElm.Interface())
-						if dstMapElm.IsValid() {
-							dstMapElm = reflect.ValueOf(dstMapElm.Interface())
-						}
-					}
-					if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
-						return
-					}
-				case reflect.Slice:
-					srcSlice := reflect.ValueOf(srcElement.Interface())
-
-					var dstSlice reflect.Value
-					if !dstElement.IsValid() || dstElement.IsNil() {
-						dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
-					} else {
-						dstSlice = reflect.ValueOf(dstElement.Interface())
-					}
-
-					if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
-						dstSlice = srcSlice
-					} else if config.AppendSlice {
-						if srcSlice.Type() != dstSlice.Type() {
-							return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
-						}
-						dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
-					}
-					dst.SetMapIndex(key, dstSlice)
-				}
-			}
-			if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
-				continue
-			}
-
-			if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
-				if dst.IsNil() {
-					dst.Set(reflect.MakeMap(dst.Type()))
-				}
-				dst.SetMapIndex(key, srcElement)
-			}
-		}
-	case reflect.Slice:
-		if !dst.CanSet() {
-			break
-		}
-		if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
-			dst.Set(src)
-		} else if config.AppendSlice {
-			if src.Type() != dst.Type() {
-				return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
-			}
-			dst.Set(reflect.AppendSlice(dst, src))
-		}
-	case reflect.Ptr:
-		fallthrough
-	case reflect.Interface:
-		if src.IsNil() {
-			break
-		}
-		if src.Kind() != reflect.Interface {
-			if dst.IsNil() || overwrite {
-				if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
-					dst.Set(src)
-				}
-			} else if src.Kind() == reflect.Ptr {
-				if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
-					return
-				}
-			} else if dst.Elem().Type() == src.Type() {
-				if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
-					return
-				}
-			} else {
-				return ErrDifferentArgumentsTypes
-			}
-			break
-		}
-		if dst.IsNil() || overwrite {
-			if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
-				dst.Set(src)
-			}
-		} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
-			return
-		}
-	default:
-		if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
-			dst.Set(src)
-		}
-	}
-	return
-}
-
-// Merge will fill any empty for value type attributes on the dst struct using corresponding
-// src attributes if they themselves are not empty. dst and src must be valid same-type structs
-// and dst must be a pointer to struct.
-// It won't merge unexported (private) fields and will do recursively any exported field.
-func Merge(dst, src interface{}, opts ...func(*Config)) error {
-	return merge(dst, src, opts...)
-}
-
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
-// non-empty src attribute values.
-// Deprecated: use Merge(…) with WithOverride
-func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
-	return merge(dst, src, append(opts, WithOverride)...)
-}
-
-// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
-func WithTransformers(transformers Transformers) func(*Config) {
-	return func(config *Config) {
-		config.Transformers = transformers
-	}
-}
-
-// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
-func WithOverride(config *Config) {
-	config.Overwrite = true
-}
-
-// WithAppendSlice will make merge append slices instead of overwriting it
-func WithAppendSlice(config *Config) {
-	config.AppendSlice = true
-}
-
-func merge(dst, src interface{}, opts ...func(*Config)) error {
-	var (
-		vDst, vSrc reflect.Value
-		err        error
-	)
-
-	config := &Config{}
-
-	for _, opt := range opts {
-		opt(config)
-	}
-
-	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
-		return err
-	}
-	if vDst.Type() != vSrc.Type() {
-		return ErrDifferentArgumentsTypes
-	}
-	return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
deleted file mode 100644
index a82fea2..0000000
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
-	"errors"
-	"reflect"
-)
-
-// Errors reported by Mergo when it finds invalid arguments.
-var (
-	ErrNilArguments                = errors.New("src and dst must not be nil")
-	ErrDifferentArgumentsTypes     = errors.New("src and dst must be of same type")
-	ErrNotSupported                = errors.New("only structs and maps are supported")
-	ErrExpectedMapAsDestination    = errors.New("dst was expected to be a map")
-	ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
-)
-
-// During deepMerge, must keep track of checks that are
-// in progress.  The comparison algorithm assumes that all
-// checks in progress are true when it reencounters them.
-// Visited are stored in a map indexed by 17 * a1 + a2;
-type visit struct {
-	ptr  uintptr
-	typ  reflect.Type
-	next *visit
-}
-
-// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		return v.Len() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Interface, reflect.Ptr:
-		if v.IsNil() {
-			return true
-		}
-		return isEmptyValue(v.Elem())
-	case reflect.Func:
-		return v.IsNil()
-	case reflect.Invalid:
-		return true
-	}
-	return false
-}
-
-func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
-	if dst == nil || src == nil {
-		err = ErrNilArguments
-		return
-	}
-	vDst = reflect.ValueOf(dst).Elem()
-	if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
-		err = ErrNotSupported
-		return
-	}
-	vSrc = reflect.ValueOf(src)
-	// We check if vSrc is a pointer to dereference it.
-	if vSrc.Kind() == reflect.Ptr {
-		vSrc = vSrc.Elem()
-	}
-	return
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
-	if dst.CanAddr() {
-		addr := dst.UnsafeAddr()
-		h := 17 * addr
-		seen := visited[h]
-		typ := dst.Type()
-		for p := seen; p != nil; p = p.next {
-			if p.ptr == addr && p.typ == typ {
-				return nil
-			}
-		}
-		// Remember, remember...
-		visited[h] = &visit{addr, typ, seen}
-	}
-	return // TODO refactor
-}
diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml
deleted file mode 100644
index 2f1ad00..0000000
--- a/vendor/github.com/imdario/mergo/testdata/license.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-import: ../../../../fossene/db/schema/thing.yml
-fields:
-    site: string
-    author: root
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE
similarity index 92%
rename from vendor/github.com/spf13/pflag/LICENSE
rename to vendor/github.com/jcmturner/gofork/LICENSE
index 63ed1cf..6a66aea 100644
--- a/vendor/github.com/spf13/pflag/LICENSE
+++ b/vendor/github.com/jcmturner/gofork/LICENSE
@@ -1,5 +1,4 @@
-Copyright (c) 2012 Alex Ogier. All rights reserved.
-Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
new file mode 100644
index 0000000..66a2a8c
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
@@ -0,0 +1,5 @@
+This is a temporary repository that will be removed when the issues below are fixed in the core golang code.
+
+## Issues
+* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832)
+* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834)
\ No newline at end of file
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
new file mode 100644
index 0000000..f1bb767
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
@@ -0,0 +1,1003 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+	"errors"
+	"fmt"
+	"math/big"
+	"reflect"
+	"strconv"
+	"time"
+	"unicode/utf8"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+	Msg string
+}
+
+func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+	Msg string
+}
+
+func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+	if len(bytes) != 1 {
+		err = SyntaxError{"invalid boolean"}
+		return
+	}
+
+	// DER demands that "If the encoding represents the boolean value TRUE,
+	// its single contents octet shall have all eight bits set to one."
+	// Thus only 0 and 255 are valid encoded values.
+	switch bytes[0] {
+	case 0:
+		ret = false
+	case 0xff:
+		ret = true
+	default:
+		err = SyntaxError{"invalid boolean"}
+	}
+
+	return
+}
+
+// INTEGER
+
+// checkInteger returns nil if the given bytes are a valid DER-encoded
+// INTEGER and an error otherwise.
+func checkInteger(bytes []byte) error {
+	if len(bytes) == 0 {
+		return StructuralError{"empty integer"}
+	}
+	if len(bytes) == 1 {
+		return nil
+	}
+	if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
+		return StructuralError{"integer not minimally-encoded"}
+	}
+	return nil
+}
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+	err = checkInteger(bytes)
+	if err != nil {
+		return
+	}
+	if len(bytes) > 8 {
+		// We'll overflow an int64 in this case.
+		err = StructuralError{"integer too large"}
+		return
+	}
+	for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+		ret <<= 8
+		ret |= int64(bytes[bytesRead])
+	}
+
+	// Shift up and down in order to sign extend the result.
+	ret <<= 64 - uint8(len(bytes))*8
+	ret >>= 64 - uint8(len(bytes))*8
+	return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte) (int32, error) {
+	if err := checkInteger(bytes); err != nil {
+		return 0, err
+	}
+	ret64, err := parseInt64(bytes)
+	if err != nil {
+		return 0, err
+	}
+	if ret64 != int64(int32(ret64)) {
+		return 0, StructuralError{"integer too large"}
+	}
+	return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) (*big.Int, error) {
+	if err := checkInteger(bytes); err != nil {
+		return nil, err
+	}
+	ret := new(big.Int)
+	if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+		// This is a negative number.
+		notBytes := make([]byte, len(bytes))
+		for i := range notBytes {
+			notBytes[i] = ^bytes[i]
+		}
+		ret.SetBytes(notBytes)
+		ret.Add(ret, bigOne)
+		ret.Neg(ret)
+		return ret, nil
+	}
+	ret.SetBytes(bytes)
+	return ret, nil
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+	Bytes     []byte // bits packed into bytes.
+	BitLength int    // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+	if i < 0 || i >= b.BitLength {
+		return 0
+	}
+	x := i / 8
+	y := 7 - uint(i%8)
+	return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+	shift := uint(8 - (b.BitLength % 8))
+	if shift == 8 || len(b.Bytes) == 0 {
+		return b.Bytes
+	}
+
+	a := make([]byte, len(b.Bytes))
+	a[0] = b.Bytes[0] >> shift
+	for i := 1; i < len(b.Bytes); i++ {
+		a[i] = b.Bytes[i-1] << (8 - shift)
+		a[i] |= b.Bytes[i] >> shift
+	}
+
+	return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+	if len(bytes) == 0 {
+		err = SyntaxError{"zero length BIT STRING"}
+		return
+	}
+	paddingBits := int(bytes[0])
+	if paddingBits > 7 ||
+		len(bytes) == 1 && paddingBits > 0 ||
+		bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
+		err = SyntaxError{"invalid padding bits in BIT STRING"}
+		return
+	}
+	ret.BitLength = (len(bytes)-1)*8 - paddingBits
+	ret.Bytes = bytes[1:]
+	return
+}
+
+// OBJECT IDENTIFIER
+
+// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
+type ObjectIdentifier []int
+
+// Equal reports whether oi and other represent the same identifier.
+func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
+	if len(oi) != len(other) {
+		return false
+	}
+	for i := 0; i < len(oi); i++ {
+		if oi[i] != other[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (oi ObjectIdentifier) String() string {
+	var s string
+
+	for i, v := range oi {
+		if i > 0 {
+			s += "."
+		}
+		s += strconv.Itoa(v)
+	}
+
+	return s
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+	if len(bytes) == 0 {
+		err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+		return
+	}
+
+	// In the worst case, we get two elements from the first byte (which is
+	// encoded differently) and then every varint is a single byte long.
+	s = make([]int, len(bytes)+1)
+
+	// The first varint is 40*value1 + value2:
+	// According to this packing, value1 can take the values 0, 1 and 2 only.
+	// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+	// then there are no restrictions on value2.
+	v, offset, err := parseBase128Int(bytes, 0)
+	if err != nil {
+		return
+	}
+	if v < 80 {
+		s[0] = v / 40
+		s[1] = v % 40
+	} else {
+		s[0] = 2
+		s[1] = v - 80
+	}
+
+	i := 2
+	for ; offset < len(bytes); i++ {
+		v, offset, err = parseBase128Int(bytes, offset)
+		if err != nil {
+			return
+		}
+		s[i] = v
+	}
+	s = s[0:i]
+	return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+	offset = initOffset
+	for shifted := 0; offset < len(bytes); shifted++ {
+		if shifted == 4 {
+			err = StructuralError{"base 128 integer too large"}
+			return
+		}
+		ret <<= 7
+		b := bytes[offset]
+		ret |= int(b & 0x7f)
+		offset++
+		if b&0x80 == 0 {
+			return
+		}
+	}
+	err = SyntaxError{"truncated base 128 integer"}
+	return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+	s := string(bytes)
+
+	formatStr := "0601021504Z0700"
+	ret, err = time.Parse(formatStr, s)
+	if err != nil {
+		formatStr = "060102150405Z0700"
+		ret, err = time.Parse(formatStr, s)
+	}
+	if err != nil {
+		return
+	}
+
+	if serialized := ret.Format(formatStr); serialized != s {
+		err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+		return
+	}
+
+	if ret.Year() >= 2050 {
+		// UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+		ret = ret.AddDate(-100, 0, 0)
+	}
+
+	return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+	const formatStr = "20060102150405Z0700"
+	s := string(bytes)
+
+	if ret, err = time.Parse(formatStr, s); err != nil {
+		return
+	}
+
+	if serialized := ret.Format(formatStr); serialized != s {
+		err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+	}
+
+	return
+}
+
+// PrintableString
+
+// parsePrintableString parses a ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+	for _, b := range bytes {
+		if !isPrintable(b) {
+			err = SyntaxError{"PrintableString contains invalid character"}
+			return
+		}
+	}
+	ret = string(bytes)
+	return
+}
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+func isPrintable(b byte) bool {
+	return 'a' <= b && b <= 'z' ||
+		'A' <= b && b <= 'Z' ||
+		'0' <= b && b <= '9' ||
+		'\'' <= b && b <= ')' ||
+		'+' <= b && b <= '/' ||
+		b == ' ' ||
+		b == ':' ||
+		b == '=' ||
+		b == '?' ||
+		// This is technically not allowed in a PrintableString.
+		// However, x509 certificates with wildcard strings don't
+		// always use the correct string type so we permit it.
+		b == '*'
+}
+
+// IA5String
+
+// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+	for _, b := range bytes {
+		if b >= utf8.RuneSelf {
+			err = SyntaxError{"IA5String contains invalid character"}
+			return
+		}
+	}
+	ret = string(bytes)
+	return
+}
+
+// T61String
+
+// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+	return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+	if !utf8.Valid(bytes) {
+		return "", errors.New("asn1: invalid UTF-8 string")
+	}
+	return string(bytes), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+	Class, Tag int
+	IsCompound bool
+	Bytes      []byte
+	FullBytes  []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+	offset = initOffset
+	// parseTagAndLength should not be called without at least a single
+	// byte to read. Thus this check is for robustness:
+	if offset >= len(bytes) {
+		err = errors.New("asn1: internal error in parseTagAndLength")
+		return
+	}
+	b := bytes[offset]
+	offset++
+	ret.class = int(b >> 6)
+	ret.isCompound = b&0x20 == 0x20
+	ret.tag = int(b & 0x1f)
+
+	// If the bottom five bits are set, then the tag number is actually base 128
+	// encoded afterwards
+	if ret.tag == 0x1f {
+		ret.tag, offset, err = parseBase128Int(bytes, offset)
+		if err != nil {
+			return
+		}
+		// Tags should be encoded in minimal form.
+		if ret.tag < 0x1f {
+			err = SyntaxError{"non-minimal tag"}
+			return
+		}
+	}
+	if offset >= len(bytes) {
+		err = SyntaxError{"truncated tag or length"}
+		return
+	}
+	b = bytes[offset]
+	offset++
+	if b&0x80 == 0 {
+		// The length is encoded in the bottom 7 bits.
+		ret.length = int(b & 0x7f)
+	} else {
+		// Bottom 7 bits give the number of length bytes to follow.
+		numBytes := int(b & 0x7f)
+		if numBytes == 0 {
+			err = SyntaxError{"indefinite length found (not DER)"}
+			return
+		}
+		ret.length = 0
+		for i := 0; i < numBytes; i++ {
+			if offset >= len(bytes) {
+				err = SyntaxError{"truncated tag or length"}
+				return
+			}
+			b = bytes[offset]
+			offset++
+			if ret.length >= 1<<23 {
+				// We can't shift ret.length up without
+				// overflowing.
+				err = StructuralError{"length too large"}
+				return
+			}
+			ret.length <<= 8
+			ret.length |= int(b)
+			if ret.length == 0 {
+				// DER requires that lengths be minimal.
+				err = StructuralError{"superfluous leading zeros in length"}
+				return
+			}
+		}
+		// Short lengths must be encoded in short form.
+		if ret.length < 0x80 {
+			err = StructuralError{"non-minimal length"}
+			return
+		}
+	}
+
+	return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+	expectedTag, compoundType, ok := getUniversalType(elemType)
+	if !ok {
+		err = StructuralError{"unknown Go type for slice"}
+		return
+	}
+
+	// First we iterate over the input and count the number of elements,
+	// checking that the types are correct in each case.
+	numElements := 0
+	for offset := 0; offset < len(bytes); {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		switch t.tag {
+		case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+			// We pretend that various other string types are
+			// PRINTABLE STRINGs so that a sequence of them can be
+			// parsed into a []string.
+			t.tag = TagPrintableString
+		case TagGeneralizedTime, TagUTCTime:
+			// Likewise, both time types are treated the same.
+			t.tag = TagUTCTime
+		}
+
+		if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag {
+			err = StructuralError{"sequence tag mismatch"}
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"truncated sequence"}
+			return
+		}
+		offset += t.length
+		numElements++
+	}
+	ret = reflect.MakeSlice(sliceType, numElements, numElements)
+	params := fieldParameters{}
+	offset := 0
+	for i := 0; i < numElements; i++ {
+		offset, err = parseField(ret.Index(i), bytes, offset, params)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+var (
+	bitStringType        = reflect.TypeOf(BitString{})
+	objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+	enumeratedType       = reflect.TypeOf(Enumerated(0))
+	flagType             = reflect.TypeOf(Flag(false))
+	timeType             = reflect.TypeOf(time.Time{})
+	rawValueType         = reflect.TypeOf(RawValue{})
+	rawContentsType      = reflect.TypeOf(RawContent(nil))
+	bigIntType           = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength returns true iff offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+	return offset+length < offset || offset+length > sliceLength
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+	offset = initOffset
+	fieldType := v.Type()
+
+	// If we have run out of data, it may be that there are optional elements at the end.
+	if offset == len(bytes) {
+		if !setDefaultValue(v, params) {
+			err = SyntaxError{"sequence truncated"}
+		}
+		return
+	}
+
+	// Deal with raw values.
+	if fieldType == rawValueType {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"data truncated"}
+			return
+		}
+		result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
+		offset += t.length
+		v.Set(reflect.ValueOf(result))
+		return
+	}
+
+	// Deal with the ANY type.
+	if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"data truncated"}
+			return
+		}
+		var result interface{}
+		if !t.isCompound && t.class == ClassUniversal {
+			innerBytes := bytes[offset : offset+t.length]
+			switch t.tag {
+			case TagPrintableString:
+				result, err = parsePrintableString(innerBytes)
+			case TagIA5String:
+				result, err = parseIA5String(innerBytes)
+			// jtasn1 addition of following case
+			case TagGeneralString:
+				result, err = parseIA5String(innerBytes)
+			case TagT61String:
+				result, err = parseT61String(innerBytes)
+			case TagUTF8String:
+				result, err = parseUTF8String(innerBytes)
+			case TagInteger:
+				result, err = parseInt64(innerBytes)
+			case TagBitString:
+				result, err = parseBitString(innerBytes)
+			case TagOID:
+				result, err = parseObjectIdentifier(innerBytes)
+			case TagUTCTime:
+				result, err = parseUTCTime(innerBytes)
+			case TagGeneralizedTime:
+				result, err = parseGeneralizedTime(innerBytes)
+			case TagOctetString:
+				result = innerBytes
+			default:
+				// If we don't know how to handle the type, we just leave Value as nil.
+			}
+		}
+		offset += t.length
+		if err != nil {
+			return
+		}
+		if result != nil {
+			v.Set(reflect.ValueOf(result))
+		}
+		return
+	}
+	universalTag, compoundType, ok1 := getUniversalType(fieldType)
+	if !ok1 {
+		err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+		return
+	}
+
+	t, offset, err := parseTagAndLength(bytes, offset)
+	if err != nil {
+		return
+	}
+	if params.explicit {
+		expectedClass := ClassContextSpecific
+		if params.application {
+			expectedClass = ClassApplication
+		}
+		if offset == len(bytes) {
+			err = StructuralError{"explicit tag has no child"}
+			return
+		}
+		if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+			if t.length > 0 {
+				t, offset, err = parseTagAndLength(bytes, offset)
+				if err != nil {
+					return
+				}
+			} else {
+				if fieldType != flagType {
+					err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
+					return
+				}
+				v.SetBool(true)
+				return
+			}
+		} else {
+			// The tags didn't match, it might be an optional element.
+			ok := setDefaultValue(v, params)
+			if ok {
+				offset = initOffset
+			} else {
+				err = StructuralError{"explicitly tagged member didn't match"}
+			}
+			return
+		}
+	}
+
+	// Special case for strings: all the ASN.1 string types map to the Go
+	// type string. getUniversalType returns the tag for PrintableString
+	// when it sees a string, so if we see a different string type on the
+	// wire, we change the universal type to match.
+	if universalTag == TagPrintableString {
+		if t.class == ClassUniversal {
+			switch t.tag {
+			case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+				universalTag = t.tag
+			}
+		} else if params.stringType != 0 {
+			universalTag = params.stringType
+		}
+	}
+
+	// Special case for time: UTCTime and GeneralizedTime both map to the
+	// Go type time.Time.
+	if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
+		universalTag = TagGeneralizedTime
+	}
+
+	if params.set {
+		universalTag = TagSet
+	}
+
+	expectedClass := ClassUniversal
+	expectedTag := universalTag
+
+	if !params.explicit && params.tag != nil {
+		expectedClass = ClassContextSpecific
+		expectedTag = *params.tag
+	}
+
+	if !params.explicit && params.application && params.tag != nil {
+		expectedClass = ClassApplication
+		expectedTag = *params.tag
+	}
+
+	// We have unwrapped any explicit tagging at this point.
+	if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
+		// Tags don't match. Again, it could be an optional element.
+		ok := setDefaultValue(v, params)
+		if ok {
+			offset = initOffset
+		} else {
+			err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+		}
+		return
+	}
+	if invalidLength(offset, t.length, len(bytes)) {
+		err = SyntaxError{"data truncated"}
+		return
+	}
+	innerBytes := bytes[offset : offset+t.length]
+	offset += t.length
+
+	// We deal with the structures defined in this package first.
+	switch fieldType {
+	case objectIdentifierType:
+		newSlice, err1 := parseObjectIdentifier(innerBytes)
+		v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+		if err1 == nil {
+			reflect.Copy(v, reflect.ValueOf(newSlice))
+		}
+		err = err1
+		return
+	case bitStringType:
+		bs, err1 := parseBitString(innerBytes)
+		if err1 == nil {
+			v.Set(reflect.ValueOf(bs))
+		}
+		err = err1
+		return
+	case timeType:
+		var time time.Time
+		var err1 error
+		if universalTag == TagUTCTime {
+			time, err1 = parseUTCTime(innerBytes)
+		} else {
+			time, err1 = parseGeneralizedTime(innerBytes)
+		}
+		if err1 == nil {
+			v.Set(reflect.ValueOf(time))
+		}
+		err = err1
+		return
+	case enumeratedType:
+		parsedInt, err1 := parseInt32(innerBytes)
+		if err1 == nil {
+			v.SetInt(int64(parsedInt))
+		}
+		err = err1
+		return
+	case flagType:
+		v.SetBool(true)
+		return
+	case bigIntType:
+		parsedInt, err1 := parseBigInt(innerBytes)
+		if err1 == nil {
+			v.Set(reflect.ValueOf(parsedInt))
+		}
+		err = err1
+		return
+	}
+	switch val := v; val.Kind() {
+	case reflect.Bool:
+		parsedBool, err1 := parseBool(innerBytes)
+		if err1 == nil {
+			val.SetBool(parsedBool)
+		}
+		err = err1
+		return
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		if val.Type().Size() == 4 {
+			parsedInt, err1 := parseInt32(innerBytes)
+			if err1 == nil {
+				val.SetInt(int64(parsedInt))
+			}
+			err = err1
+		} else {
+			parsedInt, err1 := parseInt64(innerBytes)
+			if err1 == nil {
+				val.SetInt(parsedInt)
+			}
+			err = err1
+		}
+		return
+	// TODO(dfc) Add support for the remaining integer types
+	case reflect.Struct:
+		structType := fieldType
+
+		if structType.NumField() > 0 &&
+			structType.Field(0).Type == rawContentsType {
+			bytes := bytes[initOffset:offset]
+			val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+		}
+
+		innerOffset := 0
+		for i := 0; i < structType.NumField(); i++ {
+			field := structType.Field(i)
+			if i == 0 && field.Type == rawContentsType {
+				continue
+			}
+			innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+			if err != nil {
+				return
+			}
+		}
+		// We allow extra bytes at the end of the SEQUENCE because
+		// adding elements to the end has been used in X.509 as the
+		// version numbers have increased.
+		return
+	case reflect.Slice:
+		sliceType := fieldType
+		if sliceType.Elem().Kind() == reflect.Uint8 {
+			val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+			reflect.Copy(val, reflect.ValueOf(innerBytes))
+			return
+		}
+		newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+		if err1 == nil {
+			val.Set(newSlice)
+		}
+		err = err1
+		return
+	case reflect.String:
+		var v string
+		switch universalTag {
+		case TagPrintableString:
+			v, err = parsePrintableString(innerBytes)
+		case TagIA5String:
+			v, err = parseIA5String(innerBytes)
+		case TagT61String:
+			v, err = parseT61String(innerBytes)
+		case TagUTF8String:
+			v, err = parseUTF8String(innerBytes)
+		case TagGeneralString:
+			// GeneralString is specified in ISO-2022/ECMA-35,
+			// A brief review suggests that it includes structures
+			// that allow the encoding to change midstring and
+			// such. We give up and pass it as an 8-bit string.
+			v, err = parseT61String(innerBytes)
+		default:
+			err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+		}
+		if err == nil {
+			val.SetString(v)
+		}
+		return
+	}
+	err = StructuralError{"unsupported: " + v.Type().String()}
+	return
+}
+
+// canHaveDefaultValue reports whether k is a Kind that we will set a default
+// value for. (A signed integer, essentially.)
+func canHaveDefaultValue(k reflect.Kind) bool {
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	}
+
+	return false
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful if the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+	if !params.optional {
+		return
+	}
+	ok = true
+	if params.defaultValue == nil {
+		return
+	}
+	if canHaveDefaultValue(v.Kind()) {
+		v.SetInt(*params.defaultValue)
+	}
+	return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString or IA5String can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+//	application	specifies that a APPLICATION tag is used
+//	default:x	sets the default value for optional integer fields
+//	explicit	specifies that an additional, explicit tag wraps the implicit one
+//	optional	marks the field as ASN.1 OPTIONAL
+//	set		causes a SET, rather than a SEQUENCE type to be expected
+//	tag:x		specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// If the type name of a slice element ends with "SET" then it's treated as if
+// the "set" tag was set on it. This can be used with nested slices where a
+// struct tag cannot be given.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+	return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+	v := reflect.ValueOf(val).Elem()
+	offset, err := parseField(v, b, 0, parseFieldParameters(params))
+	if err != nil {
+		return nil, err
+	}
+	return b[offset:], nil
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
new file mode 100644
index 0000000..7a9da49
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
@@ -0,0 +1,173 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+//   the tag: the type of the object
+//   a flag denoting if this object is compound or not
+//   the class type: the namespace of the tag
+//   the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+// ASN.1 tags represent the type of the following object.
+const (
+	TagBoolean         = 1
+	TagInteger         = 2
+	TagBitString       = 3
+	TagOctetString     = 4
+	TagOID             = 6
+	TagEnum            = 10
+	TagUTF8String      = 12
+	TagSequence        = 16
+	TagSet             = 17
+	TagPrintableString = 19
+	TagT61String       = 20
+	TagIA5String       = 22
+	TagUTCTime         = 23
+	TagGeneralizedTime = 24
+	TagGeneralString   = 27
+)
+
+// ASN.1 class types represent the namespace of the tag.
+const (
+	ClassUniversal       = 0
+	ClassApplication     = 1
+	ClassContextSpecific = 2
+	ClassPrivate         = 3
+)
+
+type tagAndLength struct {
+	class, tag, length int
+	isCompound         bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+	optional     bool   // true iff the field is OPTIONAL
+	explicit     bool   // true iff an EXPLICIT tag is in use.
+	application  bool   // true iff an APPLICATION tag is in use.
+	defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+	tag          *int   // the EXPLICIT or IMPLICIT tag (maybe nil).
+	stringType   int    // the string tag to use when marshaling.
+	timeType     int    // the time tag to use when marshaling.
+	set          bool   // true iff this should be encoded as a SET
+	omitEmpty    bool   // true iff this should be omitted if empty when marshaling.
+
+	// Invariants:
+	//   if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+	for _, part := range strings.Split(str, ",") {
+		switch {
+		case part == "optional":
+			ret.optional = true
+		case part == "explicit":
+			ret.explicit = true
+			if ret.tag == nil {
+				ret.tag = new(int)
+			}
+		case part == "generalized":
+			ret.timeType = TagGeneralizedTime
+		case part == "utc":
+			ret.timeType = TagUTCTime
+		case part == "ia5":
+			ret.stringType = TagIA5String
+		// jtasn1 case below added
+		case part == "generalstring":
+			ret.stringType = TagGeneralString
+		case part == "printable":
+			ret.stringType = TagPrintableString
+		case part == "utf8":
+			ret.stringType = TagUTF8String
+		case strings.HasPrefix(part, "default:"):
+			i, err := strconv.ParseInt(part[8:], 10, 64)
+			if err == nil {
+				ret.defaultValue = new(int64)
+				*ret.defaultValue = i
+			}
+		case strings.HasPrefix(part, "tag:"):
+			i, err := strconv.Atoi(part[4:])
+			if err == nil {
+				ret.tag = new(int)
+				*ret.tag = i
+			}
+		case part == "set":
+			ret.set = true
+		case part == "application":
+			ret.application = true
+			if ret.tag == nil {
+				ret.tag = new(int)
+			}
+		case part == "omitempty":
+			ret.omitEmpty = true
+		}
+	}
+	return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
+	switch t {
+	case objectIdentifierType:
+		return TagOID, false, true
+	case bitStringType:
+		return TagBitString, false, true
+	case timeType:
+		return TagUTCTime, false, true
+	case enumeratedType:
+		return TagEnum, false, true
+	case bigIntType:
+		return TagInteger, false, true
+	}
+	switch t.Kind() {
+	case reflect.Bool:
+		return TagBoolean, false, true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return TagInteger, false, true
+	case reflect.Struct:
+		return TagSequence, true, true
+	case reflect.Slice:
+		if t.Elem().Kind() == reflect.Uint8 {
+			return TagOctetString, false, true
+		}
+		if strings.HasSuffix(t.Name(), "SET") {
+			return TagSet, true, true
+		}
+		return TagSequence, true, true
+	case reflect.String:
+		return TagPrintableString, false, true
+	}
+	return 0, false, false
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
new file mode 100644
index 0000000..f52eee9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
@@ -0,0 +1,659 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math/big"
+	"reflect"
+	"time"
+	"unicode/utf8"
+)
+
+// A forkableWriter is an in-memory buffer that can be
+// 'forked' to create new forkableWriters that bracket the
+// original. After
+//    pre, post := w.fork()
+// the overall sequence of bytes represented is logically w+pre+post.
+type forkableWriter struct {
+	*bytes.Buffer
+	pre, post *forkableWriter
+}
+
+func newForkableWriter() *forkableWriter {
+	return &forkableWriter{new(bytes.Buffer), nil, nil}
+}
+
+func (f *forkableWriter) fork() (pre, post *forkableWriter) {
+	if f.pre != nil || f.post != nil {
+		panic("have already forked")
+	}
+	f.pre = newForkableWriter()
+	f.post = newForkableWriter()
+	return f.pre, f.post
+}
+
+func (f *forkableWriter) Len() (l int) {
+	l += f.Buffer.Len()
+	if f.pre != nil {
+		l += f.pre.Len()
+	}
+	if f.post != nil {
+		l += f.post.Len()
+	}
+	return
+}
+
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
+	n, err = out.Write(f.Bytes())
+	if err != nil {
+		return
+	}
+
+	var nn int
+
+	if f.pre != nil {
+		nn, err = f.pre.writeTo(out)
+		n += nn
+		if err != nil {
+			return
+		}
+	}
+
+	if f.post != nil {
+		nn, err = f.post.writeTo(out)
+		n += nn
+	}
+	return
+}
+
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
+	if n == 0 {
+		err = out.WriteByte(0)
+		return
+	}
+
+	l := 0
+	for i := n; i > 0; i >>= 7 {
+		l++
+	}
+
+	for i := l - 1; i >= 0; i-- {
+		o := byte(n >> uint(i*7))
+		o &= 0x7f
+		if i != 0 {
+			o |= 0x80
+		}
+		err = out.WriteByte(o)
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func marshalInt64(out *forkableWriter, i int64) (err error) {
+	n := int64Length(i)
+
+	for ; n > 0; n-- {
+		err = out.WriteByte(byte(i >> uint((n-1)*8)))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func int64Length(i int64) (numBytes int) {
+	numBytes = 1
+
+	for i > 127 {
+		numBytes++
+		i >>= 8
+	}
+
+	for i < -128 {
+		numBytes++
+		i >>= 8
+	}
+
+	return
+}
+
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
+	if n.Sign() < 0 {
+		// A negative number has to be converted to two's-complement
+		// form. So we'll subtract 1 and invert. If the
+		// most-significant-bit isn't set then we'll need to pad the
+		// beginning with 0xff in order to keep the number negative.
+		nMinus1 := new(big.Int).Neg(n)
+		nMinus1.Sub(nMinus1, bigOne)
+		bytes := nMinus1.Bytes()
+		for i := range bytes {
+			bytes[i] ^= 0xff
+		}
+		if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+			err = out.WriteByte(0xff)
+			if err != nil {
+				return
+			}
+		}
+		_, err = out.Write(bytes)
+	} else if n.Sign() == 0 {
+		// Zero is written as a single 0 zero rather than no bytes.
+		err = out.WriteByte(0x00)
+	} else {
+		bytes := n.Bytes()
+		if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+			// We'll have to pad this with 0x00 in order to stop it
+			// looking like a negative number.
+			err = out.WriteByte(0)
+			if err != nil {
+				return
+			}
+		}
+		_, err = out.Write(bytes)
+	}
+	return
+}
+
+func marshalLength(out *forkableWriter, i int) (err error) {
+	n := lengthLength(i)
+
+	for ; n > 0; n-- {
+		err = out.WriteByte(byte(i >> uint((n-1)*8)))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func lengthLength(i int) (numBytes int) {
+	numBytes = 1
+	for i > 255 {
+		numBytes++
+		i >>= 8
+	}
+	return
+}
+
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
+	b := uint8(t.class) << 6
+	if t.isCompound {
+		b |= 0x20
+	}
+	if t.tag >= 31 {
+		b |= 0x1f
+		err = out.WriteByte(b)
+		if err != nil {
+			return
+		}
+		err = marshalBase128Int(out, int64(t.tag))
+		if err != nil {
+			return
+		}
+	} else {
+		b |= uint8(t.tag)
+		err = out.WriteByte(b)
+		if err != nil {
+			return
+		}
+	}
+
+	if t.length >= 128 {
+		l := lengthLength(t.length)
+		err = out.WriteByte(0x80 | byte(l))
+		if err != nil {
+			return
+		}
+		err = marshalLength(out, t.length)
+		if err != nil {
+			return
+		}
+	} else {
+		err = out.WriteByte(byte(t.length))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
+	paddingBits := byte((8 - b.BitLength%8) % 8)
+	err = out.WriteByte(paddingBits)
+	if err != nil {
+		return
+	}
+	_, err = out.Write(b.Bytes)
+	return
+}
+
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
+	if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+		return StructuralError{"invalid object identifier"}
+	}
+
+	err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
+	if err != nil {
+		return
+	}
+	for i := 2; i < len(oid); i++ {
+		err = marshalBase128Int(out, int64(oid[i]))
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
+	b := []byte(s)
+	for _, c := range b {
+		if !isPrintable(c) {
+			return StructuralError{"PrintableString contains invalid character"}
+		}
+	}
+
+	_, err = out.Write(b)
+	return
+}
+
+func marshalIA5String(out *forkableWriter, s string) (err error) {
+	b := []byte(s)
+	for _, c := range b {
+		if c > 127 {
+			return StructuralError{"IA5String contains invalid character"}
+		}
+	}
+
+	_, err = out.Write(b)
+	return
+}
+
+func marshalUTF8String(out *forkableWriter, s string) (err error) {
+	_, err = out.Write([]byte(s))
+	return
+}
+
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
+	err = out.WriteByte(byte('0' + (v/10)%10))
+	if err != nil {
+		return
+	}
+	return out.WriteByte(byte('0' + v%10))
+}
+
+func marshalFourDigits(out *forkableWriter, v int) (err error) {
+	var bytes [4]byte
+	for i := range bytes {
+		bytes[3-i] = '0' + byte(v%10)
+		v /= 10
+	}
+	_, err = out.Write(bytes[:])
+	return
+}
+
+func outsideUTCRange(t time.Time) bool {
+	year := t.Year()
+	return year < 1950 || year >= 2050
+}
+
+func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
+	year := t.Year()
+
+	switch {
+	case 1950 <= year && year < 2000:
+		err = marshalTwoDigits(out, year-1900)
+	case 2000 <= year && year < 2050:
+		err = marshalTwoDigits(out, year-2000)
+	default:
+		return StructuralError{"cannot represent time as UTCTime"}
+	}
+	if err != nil {
+		return
+	}
+
+	return marshalTimeCommon(out, t)
+}
+
+func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) {
+	year := t.Year()
+	if year < 0 || year > 9999 {
+		return StructuralError{"cannot represent time as GeneralizedTime"}
+	}
+	if err = marshalFourDigits(out, year); err != nil {
+		return
+	}
+
+	return marshalTimeCommon(out, t)
+}
+
+func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) {
+	_, month, day := t.Date()
+
+	err = marshalTwoDigits(out, int(month))
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, day)
+	if err != nil {
+		return
+	}
+
+	hour, min, sec := t.Clock()
+
+	err = marshalTwoDigits(out, hour)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, min)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, sec)
+	if err != nil {
+		return
+	}
+
+	_, offset := t.Zone()
+
+	switch {
+	case offset/60 == 0:
+		err = out.WriteByte('Z')
+		return
+	case offset > 0:
+		err = out.WriteByte('+')
+	case offset < 0:
+		err = out.WriteByte('-')
+	}
+
+	if err != nil {
+		return
+	}
+
+	offsetMinutes := offset / 60
+	if offsetMinutes < 0 {
+		offsetMinutes = -offsetMinutes
+	}
+
+	err = marshalTwoDigits(out, offsetMinutes/60)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, offsetMinutes%60)
+	return
+}
+
+func stripTagAndLength(in []byte) []byte {
+	_, offset, err := parseTagAndLength(in, 0)
+	if err != nil {
+		return in
+	}
+	return in[offset:]
+}
+
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
+	switch value.Type() {
+	case flagType:
+		return nil
+	case timeType:
+		t := value.Interface().(time.Time)
+		if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
+			return marshalGeneralizedTime(out, t)
+		} else {
+			return marshalUTCTime(out, t)
+		}
+	case bitStringType:
+		return marshalBitString(out, value.Interface().(BitString))
+	case objectIdentifierType:
+		return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+	case bigIntType:
+		return marshalBigInt(out, value.Interface().(*big.Int))
+	}
+
+	switch v := value; v.Kind() {
+	case reflect.Bool:
+		if v.Bool() {
+			return out.WriteByte(255)
+		} else {
+			return out.WriteByte(0)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return marshalInt64(out, v.Int())
+	case reflect.Struct:
+		t := v.Type()
+
+		startingField := 0
+
+		// If the first element of the structure is a non-empty
+		// RawContents, then we don't bother serializing the rest.
+		if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
+			s := v.Field(0)
+			if s.Len() > 0 {
+				bytes := make([]byte, s.Len())
+				for i := 0; i < s.Len(); i++ {
+					bytes[i] = uint8(s.Index(i).Uint())
+				}
+				/* The RawContents will contain the tag and
+				 * length fields but we'll also be writing
+				 * those ourselves, so we strip them out of
+				 * bytes */
+				_, err = out.Write(stripTagAndLength(bytes))
+				return
+			} else {
+				startingField = 1
+			}
+		}
+
+		for i := startingField; i < t.NumField(); i++ {
+			var pre *forkableWriter
+			pre, out = out.fork()
+			err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
+			if err != nil {
+				return
+			}
+		}
+		return
+	case reflect.Slice:
+		sliceType := v.Type()
+		if sliceType.Elem().Kind() == reflect.Uint8 {
+			bytes := make([]byte, v.Len())
+			for i := 0; i < v.Len(); i++ {
+				bytes[i] = uint8(v.Index(i).Uint())
+			}
+			_, err = out.Write(bytes)
+			return
+		}
+
+		// jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value
+		//var fp fieldParameters
+		params.explicit = false
+		params.tag = nil
+		for i := 0; i < v.Len(); i++ {
+			var pre *forkableWriter
+			pre, out = out.fork()
+			err = marshalField(pre, v.Index(i), params)
+			if err != nil {
+				return
+			}
+		}
+		return
+	case reflect.String:
+		switch params.stringType {
+		case TagIA5String:
+			return marshalIA5String(out, v.String())
+		case TagPrintableString:
+			return marshalPrintableString(out, v.String())
+		default:
+			return marshalUTF8String(out, v.String())
+		}
+	}
+
+	return StructuralError{"unknown Go type"}
+}
+
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
+	if !v.IsValid() {
+		return fmt.Errorf("asn1: cannot marshal nil value")
+	}
+	// If the field is an interface{} then recurse into it.
+	if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+		return marshalField(out, v.Elem(), params)
+	}
+
+	if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
+		return
+	}
+
+	if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
+		defaultValue := reflect.New(v.Type()).Elem()
+		defaultValue.SetInt(*params.defaultValue)
+
+		if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
+			return
+		}
+	}
+
+	// If no default value is given then the zero value for the type is
+	// assumed to be the default value. This isn't obviously the correct
+	// behaviour, but it's what Go has traditionally done.
+	if params.optional && params.defaultValue == nil {
+		if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+			return
+		}
+	}
+
+	if v.Type() == rawValueType {
+		rv := v.Interface().(RawValue)
+		if len(rv.FullBytes) != 0 {
+			_, err = out.Write(rv.FullBytes)
+		} else {
+			err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
+			if err != nil {
+				return
+			}
+			_, err = out.Write(rv.Bytes)
+		}
+		return
+	}
+
+	tag, isCompound, ok := getUniversalType(v.Type())
+	if !ok {
+		err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
+		return
+	}
+	class := ClassUniversal
+
+	if params.timeType != 0 && tag != TagUTCTime {
+		return StructuralError{"explicit time type given to non-time member"}
+	}
+
+	// jtasn1 updated to allow slices of strings
+	if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) {
+		return StructuralError{"explicit string type given to non-string member"}
+	}
+
+	switch tag {
+	case TagPrintableString:
+		if params.stringType == 0 {
+			// This is a string without an explicit string type. We'll use
+			// a PrintableString if the character set in the string is
+			// sufficiently limited, otherwise we'll use a UTF8String.
+			for _, r := range v.String() {
+				if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
+					if !utf8.ValidString(v.String()) {
+						return errors.New("asn1: string not valid UTF-8")
+					}
+					tag = TagUTF8String
+					break
+				}
+			}
+		} else {
+			tag = params.stringType
+		}
+	case TagUTCTime:
+		if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
+			tag = TagGeneralizedTime
+		}
+	}
+
+	if params.set {
+		if tag != TagSequence {
+			return StructuralError{"non sequence tagged as set"}
+		}
+		tag = TagSet
+	}
+
+	tags, body := out.fork()
+
+	err = marshalBody(body, v, params)
+	if err != nil {
+		return
+	}
+
+	bodyLen := body.Len()
+
+	var explicitTag *forkableWriter
+	if params.explicit {
+		explicitTag, tags = tags.fork()
+	}
+
+	if !params.explicit && params.tag != nil {
+		// implicit tag.
+		tag = *params.tag
+		class = ClassContextSpecific
+	}
+
+	err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
+	if err != nil {
+		return
+	}
+
+	if params.explicit {
+		err = marshalTagAndLength(explicitTag, tagAndLength{
+			class:      ClassContextSpecific,
+			tag:        *params.tag,
+			length:     bodyLen + tags.Len(),
+			isCompound: true,
+		})
+	}
+
+	return err
+}
+
+// Marshal returns the ASN.1 encoding of val.
+//
+// In addition to the struct tags recognised by Unmarshal, the following can be
+// used:
+//
+//	ia5:		causes strings to be marshaled as ASN.1, IA5 strings
+//	omitempty:	causes empty slices to be skipped
+//	printable:	causes strings to be marshaled as ASN.1, PrintableString strings.
+//	utf8:		causes strings to be marshaled as ASN.1, UTF8 strings
+func Marshal(val interface{}) ([]byte, error) {
+	var out bytes.Buffer
+	v := reflect.ValueOf(val)
+	f := newForkableWriter()
+	err := marshalField(f, v, fieldParameters{})
+	if err != nil {
+		return nil, err
+	}
+	_, err = f.writeTo(&out)
+	return out.Bytes(), err
+}
diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..75d4187
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,98 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2
+
+import (
+	"crypto/hmac"
+	"hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+	return Key64(password, salt, int64(iter), int64(keyLen), h)
+}
+
+// Key64 derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. Key64 uses
+// int64 for the iteration count and key length to allow larger values.
+// The key is derived based on the method described as PBKDF2 with the HMAC
+// variant using the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte {
+	prf := hmac.New(h, password)
+	hashLen := int64(prf.Size())
+	numBlocks := (keyLen + hashLen - 1) / hashLen
+
+	var buf [4]byte
+	dk := make([]byte, 0, numBlocks*hashLen)
+	U := make([]byte, hashLen)
+	for block := int64(1); block <= numBlocks; block++ {
+		// N.B.: || means concatenation, ^ means XOR
+		// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+		// U_1 = PRF(password, salt || uint(i))
+		prf.Reset()
+		prf.Write(salt)
+		buf[0] = byte(block >> 24)
+		buf[1] = byte(block >> 16)
+		buf[2] = byte(block >> 8)
+		buf[3] = byte(block)
+		prf.Write(buf[:4])
+		dk = prf.Sum(dk)
+		T := dk[int64(len(dk))-hashLen:]
+		copy(U, T)
+
+		// U_n = PRF(password, U_(n-1))
+		for n := int64(2); n <= iter; n++ {
+			prf.Reset()
+			prf.Write(U)
+			U = U[:0]
+			U = prf.Sum(U)
+			for x := range U {
+				T[x] ^= U[x]
+			}
+		}
+	}
+	return dk[:keyLen]
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
deleted file mode 100644
index 955dc0b..0000000
--- a/vendor/github.com/json-iterator/go/.codecov.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-ignore:
-    - "output_tests/.*"
-
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
deleted file mode 100644
index 1555653..0000000
--- a/vendor/github.com/json-iterator/go/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/vendor
-/bug_test.go
-/coverage.txt
-/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
deleted file mode 100644
index 449e67c..0000000
--- a/vendor/github.com/json-iterator/go/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
-  - 1.8.x
-  - 1.x
-
-before_install:
-  - go get -t -v ./...
-
-script:
-  - ./test.sh
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
deleted file mode 100644
index c8a9fbb..0000000
--- a/vendor/github.com/json-iterator/go/Gopkg.lock
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "github.com/modern-go/concurrent"
-  packages = ["."]
-  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
-  version = "1.0.0"
-
-[[projects]]
-  name = "github.com/modern-go/reflect2"
-  packages = ["."]
-  revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
-  version = "1.0.1"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
deleted file mode 100644
index 313a0f8..0000000
--- a/vendor/github.com/json-iterator/go/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#  name = "github.com/x/y"
-#  version = "2.4.0"
-
-ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
-
-[[constraint]]
-  name = "github.com/modern-go/reflect2"
-  version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
deleted file mode 100644
index 50d56ff..0000000
--- a/vendor/github.com/json-iterator/go/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
-[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
-[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
-[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
-[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
-[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
-
-A high-performance 100% compatible drop-in replacement of "encoding/json"
-
-You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
-
-# Benchmark
-
-![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
-
-Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
-
-Raw Result (easyjson requires static code generation)
-
-| | ns/op | allocation bytes | allocation times |
-| --- | --- | --- | --- |
-| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
-| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
-| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
-| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
-| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
-| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
-
-Always benchmark with your own workload. 
-The result depends heavily on the data input.
-
-# Usage
-
-100% compatibility with standard lib
-
-Replace
-
-```go
-import "encoding/json"
-json.Marshal(&data)
-```
-
-with 
-
-```go
-import "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Marshal(&data)
-```
-
-Replace
-
-```go
-import "encoding/json"
-json.Unmarshal(input, &data)
-```
-
-with
-
-```go
-import "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Unmarshal(input, &data)
-```
-
-[More documentation](http://jsoniter.com/migrate-from-go-std.html)
-
-# How to get
-
-```
-go get github.com/json-iterator/go
-```
-
-# Contribution Welcomed !
-
-Contributors
-
-* [thockin](https://github.com/thockin) 
-* [mattn](https://github.com/mattn)
-* [cch123](https://github.com/cch123)
-* [Oleg Shaldybin](https://github.com/olegshaldybin)
-* [Jason Toffaletti](https://github.com/toffaletti)
-
-Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
deleted file mode 100644
index e674d0f..0000000
--- a/vendor/github.com/json-iterator/go/adapter.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package jsoniter
-
-import (
-	"bytes"
-	"io"
-)
-
-// RawMessage to make replace json with jsoniter
-type RawMessage []byte
-
-// Unmarshal adapts to json/encoding Unmarshal API
-//
-// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
-// Refer to https://godoc.org/encoding/json#Unmarshal for more information
-func Unmarshal(data []byte, v interface{}) error {
-	return ConfigDefault.Unmarshal(data, v)
-}
-
-// UnmarshalFromString convenient method to read from string instead of []byte
-func UnmarshalFromString(str string, v interface{}) error {
-	return ConfigDefault.UnmarshalFromString(str, v)
-}
-
-// Get quick method to get value from deeply nested JSON structure
-func Get(data []byte, path ...interface{}) Any {
-	return ConfigDefault.Get(data, path...)
-}
-
-// Marshal adapts to json/encoding Marshal API
-//
-// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
-// Refer to https://godoc.org/encoding/json#Marshal for more information
-func Marshal(v interface{}) ([]byte, error) {
-	return ConfigDefault.Marshal(v)
-}
-
-// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
-	return ConfigDefault.MarshalIndent(v, prefix, indent)
-}
-
-// MarshalToString convenient method to write as string instead of []byte
-func MarshalToString(v interface{}) (string, error) {
-	return ConfigDefault.MarshalToString(v)
-}
-
-// NewDecoder adapts to json/stream NewDecoder API.
-//
-// NewDecoder returns a new decoder that reads from r.
-//
-// Instead of a json/encoding Decoder, an Decoder is returned
-// Refer to https://godoc.org/encoding/json#NewDecoder for more information
-func NewDecoder(reader io.Reader) *Decoder {
-	return ConfigDefault.NewDecoder(reader)
-}
-
-// Decoder reads and decodes JSON values from an input stream.
-// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
-type Decoder struct {
-	iter *Iterator
-}
-
-// Decode decode JSON into interface{}
-func (adapter *Decoder) Decode(obj interface{}) error {
-	if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
-		if !adapter.iter.loadMore() {
-			return io.EOF
-		}
-	}
-	adapter.iter.ReadVal(obj)
-	err := adapter.iter.Error
-	if err == io.EOF {
-		return nil
-	}
-	return adapter.iter.Error
-}
-
-// More is there more?
-func (adapter *Decoder) More() bool {
-	iter := adapter.iter
-	if iter.Error != nil {
-		return false
-	}
-	c := iter.nextToken()
-	if c == 0 {
-		return false
-	}
-	iter.unreadByte()
-	return c != ']' && c != '}'
-}
-
-// Buffered remaining buffer
-func (adapter *Decoder) Buffered() io.Reader {
-	remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
-	return bytes.NewReader(remaining)
-}
-
-// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
-// Number instead of as a float64.
-func (adapter *Decoder) UseNumber() {
-	cfg := adapter.iter.cfg.configBeforeFrozen
-	cfg.UseNumber = true
-	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// DisallowUnknownFields causes the Decoder to return an error when the destination
-// is a struct and the input contains object keys which do not match any
-// non-ignored, exported fields in the destination.
-func (adapter *Decoder) DisallowUnknownFields() {
-	cfg := adapter.iter.cfg.configBeforeFrozen
-	cfg.DisallowUnknownFields = true
-	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// NewEncoder same as json.NewEncoder
-func NewEncoder(writer io.Writer) *Encoder {
-	return ConfigDefault.NewEncoder(writer)
-}
-
-// Encoder same as json.Encoder
-type Encoder struct {
-	stream *Stream
-}
-
-// Encode encode interface{} as JSON to io.Writer
-func (adapter *Encoder) Encode(val interface{}) error {
-	adapter.stream.WriteVal(val)
-	adapter.stream.WriteRaw("\n")
-	adapter.stream.Flush()
-	return adapter.stream.Error
-}
-
-// SetIndent set the indention. Prefix is not supported
-func (adapter *Encoder) SetIndent(prefix, indent string) {
-	config := adapter.stream.cfg.configBeforeFrozen
-	config.IndentionStep = len(indent)
-	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// SetEscapeHTML escape html by default, set to false to disable
-func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
-	config := adapter.stream.cfg.configBeforeFrozen
-	config.EscapeHTML = escapeHTML
-	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// Valid reports whether data is a valid JSON encoding.
-func Valid(data []byte) bool {
-	return ConfigDefault.Valid(data)
-}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
deleted file mode 100644
index f6b8aea..0000000
--- a/vendor/github.com/json-iterator/go/any.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package jsoniter
-
-import (
-	"errors"
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"io"
-	"reflect"
-	"strconv"
-	"unsafe"
-)
-
-// Any generic object representation.
-// The lazy json implementation holds []byte and parse lazily.
-type Any interface {
-	LastError() error
-	ValueType() ValueType
-	MustBeValid() Any
-	ToBool() bool
-	ToInt() int
-	ToInt32() int32
-	ToInt64() int64
-	ToUint() uint
-	ToUint32() uint32
-	ToUint64() uint64
-	ToFloat32() float32
-	ToFloat64() float64
-	ToString() string
-	ToVal(val interface{})
-	Get(path ...interface{}) Any
-	Size() int
-	Keys() []string
-	GetInterface() interface{}
-	WriteTo(stream *Stream)
-}
-
-type baseAny struct{}
-
-func (any *baseAny) Get(path ...interface{}) Any {
-	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *baseAny) Size() int {
-	return 0
-}
-
-func (any *baseAny) Keys() []string {
-	return []string{}
-}
-
-func (any *baseAny) ToVal(obj interface{}) {
-	panic("not implemented")
-}
-
-// WrapInt32 turn int32 into Any interface
-func WrapInt32(val int32) Any {
-	return &int32Any{baseAny{}, val}
-}
-
-// WrapInt64 turn int64 into Any interface
-func WrapInt64(val int64) Any {
-	return &int64Any{baseAny{}, val}
-}
-
-// WrapUint32 turn uint32 into Any interface
-func WrapUint32(val uint32) Any {
-	return &uint32Any{baseAny{}, val}
-}
-
-// WrapUint64 turn uint64 into Any interface
-func WrapUint64(val uint64) Any {
-	return &uint64Any{baseAny{}, val}
-}
-
-// WrapFloat64 turn float64 into Any interface
-func WrapFloat64(val float64) Any {
-	return &floatAny{baseAny{}, val}
-}
-
-// WrapString turn string into Any interface
-func WrapString(val string) Any {
-	return &stringAny{baseAny{}, val}
-}
-
-// Wrap turn a go object into Any interface
-func Wrap(val interface{}) Any {
-	if val == nil {
-		return &nilAny{}
-	}
-	asAny, isAny := val.(Any)
-	if isAny {
-		return asAny
-	}
-	typ := reflect2.TypeOf(val)
-	switch typ.Kind() {
-	case reflect.Slice:
-		return wrapArray(val)
-	case reflect.Struct:
-		return wrapStruct(val)
-	case reflect.Map:
-		return wrapMap(val)
-	case reflect.String:
-		return WrapString(val.(string))
-	case reflect.Int:
-		if strconv.IntSize == 32 {
-			return WrapInt32(int32(val.(int)))
-		}
-		return WrapInt64(int64(val.(int)))
-	case reflect.Int8:
-		return WrapInt32(int32(val.(int8)))
-	case reflect.Int16:
-		return WrapInt32(int32(val.(int16)))
-	case reflect.Int32:
-		return WrapInt32(val.(int32))
-	case reflect.Int64:
-		return WrapInt64(val.(int64))
-	case reflect.Uint:
-		if strconv.IntSize == 32 {
-			return WrapUint32(uint32(val.(uint)))
-		}
-		return WrapUint64(uint64(val.(uint)))
-	case reflect.Uintptr:
-		if ptrSize == 32 {
-			return WrapUint32(uint32(val.(uintptr)))
-		}
-		return WrapUint64(uint64(val.(uintptr)))
-	case reflect.Uint8:
-		return WrapUint32(uint32(val.(uint8)))
-	case reflect.Uint16:
-		return WrapUint32(uint32(val.(uint16)))
-	case reflect.Uint32:
-		return WrapUint32(uint32(val.(uint32)))
-	case reflect.Uint64:
-		return WrapUint64(val.(uint64))
-	case reflect.Float32:
-		return WrapFloat64(float64(val.(float32)))
-	case reflect.Float64:
-		return WrapFloat64(val.(float64))
-	case reflect.Bool:
-		if val.(bool) == true {
-			return &trueAny{}
-		}
-		return &falseAny{}
-	}
-	return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
-}
-
-// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
-func (iter *Iterator) ReadAny() Any {
-	return iter.readAny()
-}
-
-func (iter *Iterator) readAny() Any {
-	c := iter.nextToken()
-	switch c {
-	case '"':
-		iter.unreadByte()
-		return &stringAny{baseAny{}, iter.ReadString()}
-	case 'n':
-		iter.skipThreeBytes('u', 'l', 'l') // null
-		return &nilAny{}
-	case 't':
-		iter.skipThreeBytes('r', 'u', 'e') // true
-		return &trueAny{}
-	case 'f':
-		iter.skipFourBytes('a', 'l', 's', 'e') // false
-		return &falseAny{}
-	case '{':
-		return iter.readObjectAny()
-	case '[':
-		return iter.readArrayAny()
-	case '-':
-		return iter.readNumberAny(false)
-	case 0:
-		return &invalidAny{baseAny{}, errors.New("input is empty")}
-	default:
-		return iter.readNumberAny(true)
-	}
-}
-
-func (iter *Iterator) readNumberAny(positive bool) Any {
-	iter.startCapture(iter.head - 1)
-	iter.skipNumber()
-	lazyBuf := iter.stopCapture()
-	return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readObjectAny() Any {
-	iter.startCapture(iter.head - 1)
-	iter.skipObject()
-	lazyBuf := iter.stopCapture()
-	return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readArrayAny() Any {
-	iter.startCapture(iter.head - 1)
-	iter.skipArray()
-	lazyBuf := iter.stopCapture()
-	return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func locateObjectField(iter *Iterator, target string) []byte {
-	var found []byte
-	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
-		if field == target {
-			found = iter.SkipAndReturnBytes()
-			return false
-		}
-		iter.Skip()
-		return true
-	})
-	return found
-}
-
-func locateArrayElement(iter *Iterator, target int) []byte {
-	var found []byte
-	n := 0
-	iter.ReadArrayCB(func(iter *Iterator) bool {
-		if n == target {
-			found = iter.SkipAndReturnBytes()
-			return false
-		}
-		iter.Skip()
-		n++
-		return true
-	})
-	return found
-}
-
-func locatePath(iter *Iterator, path []interface{}) Any {
-	for i, pathKeyObj := range path {
-		switch pathKey := pathKeyObj.(type) {
-		case string:
-			valueBytes := locateObjectField(iter, pathKey)
-			if valueBytes == nil {
-				return newInvalidAny(path[i:])
-			}
-			iter.ResetBytes(valueBytes)
-		case int:
-			valueBytes := locateArrayElement(iter, pathKey)
-			if valueBytes == nil {
-				return newInvalidAny(path[i:])
-			}
-			iter.ResetBytes(valueBytes)
-		case int32:
-			if '*' == pathKey {
-				return iter.readAny().Get(path[i:]...)
-			}
-			return newInvalidAny(path[i:])
-		default:
-			return newInvalidAny(path[i:])
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		return &invalidAny{baseAny{}, iter.Error}
-	}
-	return iter.readAny()
-}
-
-var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
-
-func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
-	if typ == anyType {
-		return &directAnyCodec{}
-	}
-	if typ.Implements(anyType) {
-		return &anyCodec{
-			valType: typ,
-		}
-	}
-	return nil
-}
-
-func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
-	if typ == anyType {
-		return &directAnyCodec{}
-	}
-	if typ.Implements(anyType) {
-		return &anyCodec{
-			valType: typ,
-		}
-	}
-	return nil
-}
-
-type anyCodec struct {
-	valType reflect2.Type
-}
-
-func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	panic("not implemented")
-}
-
-func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	obj := codec.valType.UnsafeIndirect(ptr)
-	any := obj.(Any)
-	any.WriteTo(stream)
-}
-
-func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	obj := codec.valType.UnsafeIndirect(ptr)
-	any := obj.(Any)
-	return any.Size() == 0
-}
-
-type directAnyCodec struct {
-}
-
-func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	*(*Any)(ptr) = iter.readAny()
-}
-
-func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	any := *(*Any)(ptr)
-	if any == nil {
-		stream.WriteNil()
-		return
-	}
-	any.WriteTo(stream)
-}
-
-func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	any := *(*Any)(ptr)
-	return any.Size() == 0
-}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
deleted file mode 100644
index 0449e9a..0000000
--- a/vendor/github.com/json-iterator/go/any_array.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package jsoniter
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type arrayLazyAny struct {
-	baseAny
-	cfg *frozenConfig
-	buf []byte
-	err error
-}
-
-func (any *arrayLazyAny) ValueType() ValueType {
-	return ArrayValue
-}
-
-func (any *arrayLazyAny) MustBeValid() Any {
-	return any
-}
-
-func (any *arrayLazyAny) LastError() error {
-	return any.err
-}
-
-func (any *arrayLazyAny) ToBool() bool {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	return iter.ReadArray()
-}
-
-func (any *arrayLazyAny) ToInt() int {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToInt32() int32 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToInt64() int64 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToUint() uint {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToUint32() uint32 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToUint64() uint64 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToFloat32() float32 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToFloat64() float64 {
-	if any.ToBool() {
-		return 1
-	}
-	return 0
-}
-
-func (any *arrayLazyAny) ToString() string {
-	return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *arrayLazyAny) ToVal(val interface{}) {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	iter.ReadVal(val)
-}
-
-func (any *arrayLazyAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	switch firstPath := path[0].(type) {
-	case int:
-		iter := any.cfg.BorrowIterator(any.buf)
-		defer any.cfg.ReturnIterator(iter)
-		valueBytes := locateArrayElement(iter, firstPath)
-		if valueBytes == nil {
-			return newInvalidAny(path)
-		}
-		iter.ResetBytes(valueBytes)
-		return locatePath(iter, path[1:])
-	case int32:
-		if '*' == firstPath {
-			iter := any.cfg.BorrowIterator(any.buf)
-			defer any.cfg.ReturnIterator(iter)
-			arr := make([]Any, 0)
-			iter.ReadArrayCB(func(iter *Iterator) bool {
-				found := iter.readAny().Get(path[1:]...)
-				if found.ValueType() != InvalidValue {
-					arr = append(arr, found)
-				}
-				return true
-			})
-			return wrapArray(arr)
-		}
-		return newInvalidAny(path)
-	default:
-		return newInvalidAny(path)
-	}
-}
-
-func (any *arrayLazyAny) Size() int {
-	size := 0
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	iter.ReadArrayCB(func(iter *Iterator) bool {
-		size++
-		iter.Skip()
-		return true
-	})
-	return size
-}
-
-func (any *arrayLazyAny) WriteTo(stream *Stream) {
-	stream.Write(any.buf)
-}
-
-func (any *arrayLazyAny) GetInterface() interface{} {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	return iter.Read()
-}
-
-type arrayAny struct {
-	baseAny
-	val reflect.Value
-}
-
-func wrapArray(val interface{}) *arrayAny {
-	return &arrayAny{baseAny{}, reflect.ValueOf(val)}
-}
-
-func (any *arrayAny) ValueType() ValueType {
-	return ArrayValue
-}
-
-func (any *arrayAny) MustBeValid() Any {
-	return any
-}
-
-func (any *arrayAny) LastError() error {
-	return nil
-}
-
-func (any *arrayAny) ToBool() bool {
-	return any.val.Len() != 0
-}
-
-func (any *arrayAny) ToInt() int {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToInt32() int32 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToInt64() int64 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToUint() uint {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToUint32() uint32 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToUint64() uint64 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToFloat32() float32 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToFloat64() float64 {
-	if any.val.Len() == 0 {
-		return 0
-	}
-	return 1
-}
-
-func (any *arrayAny) ToString() string {
-	str, _ := MarshalToString(any.val.Interface())
-	return str
-}
-
-func (any *arrayAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	switch firstPath := path[0].(type) {
-	case int:
-		if firstPath < 0 || firstPath >= any.val.Len() {
-			return newInvalidAny(path)
-		}
-		return Wrap(any.val.Index(firstPath).Interface())
-	case int32:
-		if '*' == firstPath {
-			mappedAll := make([]Any, 0)
-			for i := 0; i < any.val.Len(); i++ {
-				mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
-				if mapped.ValueType() != InvalidValue {
-					mappedAll = append(mappedAll, mapped)
-				}
-			}
-			return wrapArray(mappedAll)
-		}
-		return newInvalidAny(path)
-	default:
-		return newInvalidAny(path)
-	}
-}
-
-func (any *arrayAny) Size() int {
-	return any.val.Len()
-}
-
-func (any *arrayAny) WriteTo(stream *Stream) {
-	stream.WriteVal(any.val)
-}
-
-func (any *arrayAny) GetInterface() interface{} {
-	return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
deleted file mode 100644
index 9452324..0000000
--- a/vendor/github.com/json-iterator/go/any_bool.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package jsoniter
-
-type trueAny struct {
-	baseAny
-}
-
-func (any *trueAny) LastError() error {
-	return nil
-}
-
-func (any *trueAny) ToBool() bool {
-	return true
-}
-
-func (any *trueAny) ToInt() int {
-	return 1
-}
-
-func (any *trueAny) ToInt32() int32 {
-	return 1
-}
-
-func (any *trueAny) ToInt64() int64 {
-	return 1
-}
-
-func (any *trueAny) ToUint() uint {
-	return 1
-}
-
-func (any *trueAny) ToUint32() uint32 {
-	return 1
-}
-
-func (any *trueAny) ToUint64() uint64 {
-	return 1
-}
-
-func (any *trueAny) ToFloat32() float32 {
-	return 1
-}
-
-func (any *trueAny) ToFloat64() float64 {
-	return 1
-}
-
-func (any *trueAny) ToString() string {
-	return "true"
-}
-
-func (any *trueAny) WriteTo(stream *Stream) {
-	stream.WriteTrue()
-}
-
-func (any *trueAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *trueAny) GetInterface() interface{} {
-	return true
-}
-
-func (any *trueAny) ValueType() ValueType {
-	return BoolValue
-}
-
-func (any *trueAny) MustBeValid() Any {
-	return any
-}
-
-type falseAny struct {
-	baseAny
-}
-
-func (any *falseAny) LastError() error {
-	return nil
-}
-
-func (any *falseAny) ToBool() bool {
-	return false
-}
-
-func (any *falseAny) ToInt() int {
-	return 0
-}
-
-func (any *falseAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *falseAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *falseAny) ToUint() uint {
-	return 0
-}
-
-func (any *falseAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *falseAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *falseAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *falseAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *falseAny) ToString() string {
-	return "false"
-}
-
-func (any *falseAny) WriteTo(stream *Stream) {
-	stream.WriteFalse()
-}
-
-func (any *falseAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *falseAny) GetInterface() interface{} {
-	return false
-}
-
-func (any *falseAny) ValueType() ValueType {
-	return BoolValue
-}
-
-func (any *falseAny) MustBeValid() Any {
-	return any
-}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
deleted file mode 100644
index 35fdb09..0000000
--- a/vendor/github.com/json-iterator/go/any_float.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package jsoniter
-
-import (
-	"strconv"
-)
-
-type floatAny struct {
-	baseAny
-	val float64
-}
-
-func (any *floatAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *floatAny) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *floatAny) MustBeValid() Any {
-	return any
-}
-
-func (any *floatAny) LastError() error {
-	return nil
-}
-
-func (any *floatAny) ToBool() bool {
-	return any.ToFloat64() != 0
-}
-
-func (any *floatAny) ToInt() int {
-	return int(any.val)
-}
-
-func (any *floatAny) ToInt32() int32 {
-	return int32(any.val)
-}
-
-func (any *floatAny) ToInt64() int64 {
-	return int64(any.val)
-}
-
-func (any *floatAny) ToUint() uint {
-	if any.val > 0 {
-		return uint(any.val)
-	}
-	return 0
-}
-
-func (any *floatAny) ToUint32() uint32 {
-	if any.val > 0 {
-		return uint32(any.val)
-	}
-	return 0
-}
-
-func (any *floatAny) ToUint64() uint64 {
-	if any.val > 0 {
-		return uint64(any.val)
-	}
-	return 0
-}
-
-func (any *floatAny) ToFloat32() float32 {
-	return float32(any.val)
-}
-
-func (any *floatAny) ToFloat64() float64 {
-	return any.val
-}
-
-func (any *floatAny) ToString() string {
-	return strconv.FormatFloat(any.val, 'E', -1, 64)
-}
-
-func (any *floatAny) WriteTo(stream *Stream) {
-	stream.WriteFloat64(any.val)
-}
-
-func (any *floatAny) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
deleted file mode 100644
index 1b56f39..0000000
--- a/vendor/github.com/json-iterator/go/any_int32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
-	"strconv"
-)
-
-type int32Any struct {
-	baseAny
-	val int32
-}
-
-func (any *int32Any) LastError() error {
-	return nil
-}
-
-func (any *int32Any) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *int32Any) MustBeValid() Any {
-	return any
-}
-
-func (any *int32Any) ToBool() bool {
-	return any.val != 0
-}
-
-func (any *int32Any) ToInt() int {
-	return int(any.val)
-}
-
-func (any *int32Any) ToInt32() int32 {
-	return any.val
-}
-
-func (any *int32Any) ToInt64() int64 {
-	return int64(any.val)
-}
-
-func (any *int32Any) ToUint() uint {
-	return uint(any.val)
-}
-
-func (any *int32Any) ToUint32() uint32 {
-	return uint32(any.val)
-}
-
-func (any *int32Any) ToUint64() uint64 {
-	return uint64(any.val)
-}
-
-func (any *int32Any) ToFloat32() float32 {
-	return float32(any.val)
-}
-
-func (any *int32Any) ToFloat64() float64 {
-	return float64(any.val)
-}
-
-func (any *int32Any) ToString() string {
-	return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *int32Any) WriteTo(stream *Stream) {
-	stream.WriteInt32(any.val)
-}
-
-func (any *int32Any) Parse() *Iterator {
-	return nil
-}
-
-func (any *int32Any) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
deleted file mode 100644
index c440d72..0000000
--- a/vendor/github.com/json-iterator/go/any_int64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
-	"strconv"
-)
-
-type int64Any struct {
-	baseAny
-	val int64
-}
-
-func (any *int64Any) LastError() error {
-	return nil
-}
-
-func (any *int64Any) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *int64Any) MustBeValid() Any {
-	return any
-}
-
-func (any *int64Any) ToBool() bool {
-	return any.val != 0
-}
-
-func (any *int64Any) ToInt() int {
-	return int(any.val)
-}
-
-func (any *int64Any) ToInt32() int32 {
-	return int32(any.val)
-}
-
-func (any *int64Any) ToInt64() int64 {
-	return any.val
-}
-
-func (any *int64Any) ToUint() uint {
-	return uint(any.val)
-}
-
-func (any *int64Any) ToUint32() uint32 {
-	return uint32(any.val)
-}
-
-func (any *int64Any) ToUint64() uint64 {
-	return uint64(any.val)
-}
-
-func (any *int64Any) ToFloat32() float32 {
-	return float32(any.val)
-}
-
-func (any *int64Any) ToFloat64() float64 {
-	return float64(any.val)
-}
-
-func (any *int64Any) ToString() string {
-	return strconv.FormatInt(any.val, 10)
-}
-
-func (any *int64Any) WriteTo(stream *Stream) {
-	stream.WriteInt64(any.val)
-}
-
-func (any *int64Any) Parse() *Iterator {
-	return nil
-}
-
-func (any *int64Any) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
deleted file mode 100644
index 1d859ea..0000000
--- a/vendor/github.com/json-iterator/go/any_invalid.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-type invalidAny struct {
-	baseAny
-	err error
-}
-
-func newInvalidAny(path []interface{}) *invalidAny {
-	return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
-}
-
-func (any *invalidAny) LastError() error {
-	return any.err
-}
-
-func (any *invalidAny) ValueType() ValueType {
-	return InvalidValue
-}
-
-func (any *invalidAny) MustBeValid() Any {
-	panic(any.err)
-}
-
-func (any *invalidAny) ToBool() bool {
-	return false
-}
-
-func (any *invalidAny) ToInt() int {
-	return 0
-}
-
-func (any *invalidAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *invalidAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *invalidAny) ToUint() uint {
-	return 0
-}
-
-func (any *invalidAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *invalidAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *invalidAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *invalidAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *invalidAny) ToString() string {
-	return ""
-}
-
-func (any *invalidAny) WriteTo(stream *Stream) {
-}
-
-func (any *invalidAny) Get(path ...interface{}) Any {
-	if any.err == nil {
-		return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
-	}
-	return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
-}
-
-func (any *invalidAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *invalidAny) GetInterface() interface{} {
-	return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
deleted file mode 100644
index d04cb54..0000000
--- a/vendor/github.com/json-iterator/go/any_nil.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package jsoniter
-
-type nilAny struct {
-	baseAny
-}
-
-func (any *nilAny) LastError() error {
-	return nil
-}
-
-func (any *nilAny) ValueType() ValueType {
-	return NilValue
-}
-
-func (any *nilAny) MustBeValid() Any {
-	return any
-}
-
-func (any *nilAny) ToBool() bool {
-	return false
-}
-
-func (any *nilAny) ToInt() int {
-	return 0
-}
-
-func (any *nilAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *nilAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *nilAny) ToUint() uint {
-	return 0
-}
-
-func (any *nilAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *nilAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *nilAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *nilAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *nilAny) ToString() string {
-	return ""
-}
-
-func (any *nilAny) WriteTo(stream *Stream) {
-	stream.WriteNil()
-}
-
-func (any *nilAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *nilAny) GetInterface() interface{} {
-	return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
deleted file mode 100644
index 9d1e901..0000000
--- a/vendor/github.com/json-iterator/go/any_number.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package jsoniter
-
-import (
-	"io"
-	"unsafe"
-)
-
-type numberLazyAny struct {
-	baseAny
-	cfg *frozenConfig
-	buf []byte
-	err error
-}
-
-func (any *numberLazyAny) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *numberLazyAny) MustBeValid() Any {
-	return any
-}
-
-func (any *numberLazyAny) LastError() error {
-	return any.err
-}
-
-func (any *numberLazyAny) ToBool() bool {
-	return any.ToFloat64() != 0
-}
-
-func (any *numberLazyAny) ToInt() int {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadInt()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToInt32() int32 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadInt32()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToInt64() int64 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadInt64()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToUint() uint {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadUint()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToUint32() uint32 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadUint32()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToUint64() uint64 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadUint64()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToFloat32() float32 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadFloat32()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToFloat64() float64 {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	val := iter.ReadFloat64()
-	if iter.Error != nil && iter.Error != io.EOF {
-		any.err = iter.Error
-	}
-	return val
-}
-
-func (any *numberLazyAny) ToString() string {
-	return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *numberLazyAny) WriteTo(stream *Stream) {
-	stream.Write(any.buf)
-}
-
-func (any *numberLazyAny) GetInterface() interface{} {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	return iter.Read()
-}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
deleted file mode 100644
index c44ef5c..0000000
--- a/vendor/github.com/json-iterator/go/any_object.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package jsoniter
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type objectLazyAny struct {
-	baseAny
-	cfg *frozenConfig
-	buf []byte
-	err error
-}
-
-func (any *objectLazyAny) ValueType() ValueType {
-	return ObjectValue
-}
-
-func (any *objectLazyAny) MustBeValid() Any {
-	return any
-}
-
-func (any *objectLazyAny) LastError() error {
-	return any.err
-}
-
-func (any *objectLazyAny) ToBool() bool {
-	return true
-}
-
-func (any *objectLazyAny) ToInt() int {
-	return 0
-}
-
-func (any *objectLazyAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *objectLazyAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *objectLazyAny) ToUint() uint {
-	return 0
-}
-
-func (any *objectLazyAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *objectLazyAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *objectLazyAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *objectLazyAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *objectLazyAny) ToString() string {
-	return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *objectLazyAny) ToVal(obj interface{}) {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	iter.ReadVal(obj)
-}
-
-func (any *objectLazyAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	switch firstPath := path[0].(type) {
-	case string:
-		iter := any.cfg.BorrowIterator(any.buf)
-		defer any.cfg.ReturnIterator(iter)
-		valueBytes := locateObjectField(iter, firstPath)
-		if valueBytes == nil {
-			return newInvalidAny(path)
-		}
-		iter.ResetBytes(valueBytes)
-		return locatePath(iter, path[1:])
-	case int32:
-		if '*' == firstPath {
-			mappedAll := map[string]Any{}
-			iter := any.cfg.BorrowIterator(any.buf)
-			defer any.cfg.ReturnIterator(iter)
-			iter.ReadMapCB(func(iter *Iterator, field string) bool {
-				mapped := locatePath(iter, path[1:])
-				if mapped.ValueType() != InvalidValue {
-					mappedAll[field] = mapped
-				}
-				return true
-			})
-			return wrapMap(mappedAll)
-		}
-		return newInvalidAny(path)
-	default:
-		return newInvalidAny(path)
-	}
-}
-
-func (any *objectLazyAny) Keys() []string {
-	keys := []string{}
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	iter.ReadMapCB(func(iter *Iterator, field string) bool {
-		iter.Skip()
-		keys = append(keys, field)
-		return true
-	})
-	return keys
-}
-
-func (any *objectLazyAny) Size() int {
-	size := 0
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
-		iter.Skip()
-		size++
-		return true
-	})
-	return size
-}
-
-func (any *objectLazyAny) WriteTo(stream *Stream) {
-	stream.Write(any.buf)
-}
-
-func (any *objectLazyAny) GetInterface() interface{} {
-	iter := any.cfg.BorrowIterator(any.buf)
-	defer any.cfg.ReturnIterator(iter)
-	return iter.Read()
-}
-
-type objectAny struct {
-	baseAny
-	err error
-	val reflect.Value
-}
-
-func wrapStruct(val interface{}) *objectAny {
-	return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *objectAny) ValueType() ValueType {
-	return ObjectValue
-}
-
-func (any *objectAny) MustBeValid() Any {
-	return any
-}
-
-func (any *objectAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *objectAny) LastError() error {
-	return any.err
-}
-
-func (any *objectAny) ToBool() bool {
-	return any.val.NumField() != 0
-}
-
-func (any *objectAny) ToInt() int {
-	return 0
-}
-
-func (any *objectAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *objectAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *objectAny) ToUint() uint {
-	return 0
-}
-
-func (any *objectAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *objectAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *objectAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *objectAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *objectAny) ToString() string {
-	str, err := MarshalToString(any.val.Interface())
-	any.err = err
-	return str
-}
-
-func (any *objectAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	switch firstPath := path[0].(type) {
-	case string:
-		field := any.val.FieldByName(firstPath)
-		if !field.IsValid() {
-			return newInvalidAny(path)
-		}
-		return Wrap(field.Interface())
-	case int32:
-		if '*' == firstPath {
-			mappedAll := map[string]Any{}
-			for i := 0; i < any.val.NumField(); i++ {
-				field := any.val.Field(i)
-				if field.CanInterface() {
-					mapped := Wrap(field.Interface()).Get(path[1:]...)
-					if mapped.ValueType() != InvalidValue {
-						mappedAll[any.val.Type().Field(i).Name] = mapped
-					}
-				}
-			}
-			return wrapMap(mappedAll)
-		}
-		return newInvalidAny(path)
-	default:
-		return newInvalidAny(path)
-	}
-}
-
-func (any *objectAny) Keys() []string {
-	keys := make([]string, 0, any.val.NumField())
-	for i := 0; i < any.val.NumField(); i++ {
-		keys = append(keys, any.val.Type().Field(i).Name)
-	}
-	return keys
-}
-
-func (any *objectAny) Size() int {
-	return any.val.NumField()
-}
-
-func (any *objectAny) WriteTo(stream *Stream) {
-	stream.WriteVal(any.val)
-}
-
-func (any *objectAny) GetInterface() interface{} {
-	return any.val.Interface()
-}
-
-type mapAny struct {
-	baseAny
-	err error
-	val reflect.Value
-}
-
-func wrapMap(val interface{}) *mapAny {
-	return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *mapAny) ValueType() ValueType {
-	return ObjectValue
-}
-
-func (any *mapAny) MustBeValid() Any {
-	return any
-}
-
-func (any *mapAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *mapAny) LastError() error {
-	return any.err
-}
-
-func (any *mapAny) ToBool() bool {
-	return true
-}
-
-func (any *mapAny) ToInt() int {
-	return 0
-}
-
-func (any *mapAny) ToInt32() int32 {
-	return 0
-}
-
-func (any *mapAny) ToInt64() int64 {
-	return 0
-}
-
-func (any *mapAny) ToUint() uint {
-	return 0
-}
-
-func (any *mapAny) ToUint32() uint32 {
-	return 0
-}
-
-func (any *mapAny) ToUint64() uint64 {
-	return 0
-}
-
-func (any *mapAny) ToFloat32() float32 {
-	return 0
-}
-
-func (any *mapAny) ToFloat64() float64 {
-	return 0
-}
-
-func (any *mapAny) ToString() string {
-	str, err := MarshalToString(any.val.Interface())
-	any.err = err
-	return str
-}
-
-func (any *mapAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	switch firstPath := path[0].(type) {
-	case int32:
-		if '*' == firstPath {
-			mappedAll := map[string]Any{}
-			for _, key := range any.val.MapKeys() {
-				keyAsStr := key.String()
-				element := Wrap(any.val.MapIndex(key).Interface())
-				mapped := element.Get(path[1:]...)
-				if mapped.ValueType() != InvalidValue {
-					mappedAll[keyAsStr] = mapped
-				}
-			}
-			return wrapMap(mappedAll)
-		}
-		return newInvalidAny(path)
-	default:
-		value := any.val.MapIndex(reflect.ValueOf(firstPath))
-		if !value.IsValid() {
-			return newInvalidAny(path)
-		}
-		return Wrap(value.Interface())
-	}
-}
-
-func (any *mapAny) Keys() []string {
-	keys := make([]string, 0, any.val.Len())
-	for _, key := range any.val.MapKeys() {
-		keys = append(keys, key.String())
-	}
-	return keys
-}
-
-func (any *mapAny) Size() int {
-	return any.val.Len()
-}
-
-func (any *mapAny) WriteTo(stream *Stream) {
-	stream.WriteVal(any.val)
-}
-
-func (any *mapAny) GetInterface() interface{} {
-	return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
deleted file mode 100644
index a4b93c7..0000000
--- a/vendor/github.com/json-iterator/go/any_str.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"strconv"
-)
-
-type stringAny struct {
-	baseAny
-	val string
-}
-
-func (any *stringAny) Get(path ...interface{}) Any {
-	if len(path) == 0 {
-		return any
-	}
-	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *stringAny) Parse() *Iterator {
-	return nil
-}
-
-func (any *stringAny) ValueType() ValueType {
-	return StringValue
-}
-
-func (any *stringAny) MustBeValid() Any {
-	return any
-}
-
-func (any *stringAny) LastError() error {
-	return nil
-}
-
-func (any *stringAny) ToBool() bool {
-	str := any.ToString()
-	if str == "0" {
-		return false
-	}
-	for _, c := range str {
-		switch c {
-		case ' ', '\n', '\r', '\t':
-		default:
-			return true
-		}
-	}
-	return false
-}
-
-func (any *stringAny) ToInt() int {
-	return int(any.ToInt64())
-
-}
-
-func (any *stringAny) ToInt32() int32 {
-	return int32(any.ToInt64())
-}
-
-func (any *stringAny) ToInt64() int64 {
-	if any.val == "" {
-		return 0
-	}
-
-	flag := 1
-	startPos := 0
-	endPos := 0
-	if any.val[0] == '+' || any.val[0] == '-' {
-		startPos = 1
-	}
-
-	if any.val[0] == '-' {
-		flag = -1
-	}
-
-	for i := startPos; i < len(any.val); i++ {
-		if any.val[i] >= '0' && any.val[i] <= '9' {
-			endPos = i + 1
-		} else {
-			break
-		}
-	}
-	parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
-	return int64(flag) * parsed
-}
-
-func (any *stringAny) ToUint() uint {
-	return uint(any.ToUint64())
-}
-
-func (any *stringAny) ToUint32() uint32 {
-	return uint32(any.ToUint64())
-}
-
-func (any *stringAny) ToUint64() uint64 {
-	if any.val == "" {
-		return 0
-	}
-
-	startPos := 0
-	endPos := 0
-
-	if any.val[0] == '-' {
-		return 0
-	}
-	if any.val[0] == '+' {
-		startPos = 1
-	}
-
-	for i := startPos; i < len(any.val); i++ {
-		if any.val[i] >= '0' && any.val[i] <= '9' {
-			endPos = i + 1
-		} else {
-			break
-		}
-	}
-	parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
-	return parsed
-}
-
-func (any *stringAny) ToFloat32() float32 {
-	return float32(any.ToFloat64())
-}
-
-func (any *stringAny) ToFloat64() float64 {
-	if len(any.val) == 0 {
-		return 0
-	}
-
-	// first char invalid
-	if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
-		return 0
-	}
-
-	// extract valid num expression from string
-	// eg 123true => 123, -12.12xxa => -12.12
-	endPos := 1
-	for i := 1; i < len(any.val); i++ {
-		if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
-			endPos = i + 1
-			continue
-		}
-
-		// end position is the first char which is not digit
-		if any.val[i] >= '0' && any.val[i] <= '9' {
-			endPos = i + 1
-		} else {
-			endPos = i
-			break
-		}
-	}
-	parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
-	return parsed
-}
-
-func (any *stringAny) ToString() string {
-	return any.val
-}
-
-func (any *stringAny) WriteTo(stream *Stream) {
-	stream.WriteString(any.val)
-}
-
-func (any *stringAny) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
deleted file mode 100644
index 656bbd3..0000000
--- a/vendor/github.com/json-iterator/go/any_uint32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
-	"strconv"
-)
-
-type uint32Any struct {
-	baseAny
-	val uint32
-}
-
-func (any *uint32Any) LastError() error {
-	return nil
-}
-
-func (any *uint32Any) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *uint32Any) MustBeValid() Any {
-	return any
-}
-
-func (any *uint32Any) ToBool() bool {
-	return any.val != 0
-}
-
-func (any *uint32Any) ToInt() int {
-	return int(any.val)
-}
-
-func (any *uint32Any) ToInt32() int32 {
-	return int32(any.val)
-}
-
-func (any *uint32Any) ToInt64() int64 {
-	return int64(any.val)
-}
-
-func (any *uint32Any) ToUint() uint {
-	return uint(any.val)
-}
-
-func (any *uint32Any) ToUint32() uint32 {
-	return any.val
-}
-
-func (any *uint32Any) ToUint64() uint64 {
-	return uint64(any.val)
-}
-
-func (any *uint32Any) ToFloat32() float32 {
-	return float32(any.val)
-}
-
-func (any *uint32Any) ToFloat64() float64 {
-	return float64(any.val)
-}
-
-func (any *uint32Any) ToString() string {
-	return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *uint32Any) WriteTo(stream *Stream) {
-	stream.WriteUint32(any.val)
-}
-
-func (any *uint32Any) Parse() *Iterator {
-	return nil
-}
-
-func (any *uint32Any) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
deleted file mode 100644
index 7df2fce..0000000
--- a/vendor/github.com/json-iterator/go/any_uint64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
-	"strconv"
-)
-
-type uint64Any struct {
-	baseAny
-	val uint64
-}
-
-func (any *uint64Any) LastError() error {
-	return nil
-}
-
-func (any *uint64Any) ValueType() ValueType {
-	return NumberValue
-}
-
-func (any *uint64Any) MustBeValid() Any {
-	return any
-}
-
-func (any *uint64Any) ToBool() bool {
-	return any.val != 0
-}
-
-func (any *uint64Any) ToInt() int {
-	return int(any.val)
-}
-
-func (any *uint64Any) ToInt32() int32 {
-	return int32(any.val)
-}
-
-func (any *uint64Any) ToInt64() int64 {
-	return int64(any.val)
-}
-
-func (any *uint64Any) ToUint() uint {
-	return uint(any.val)
-}
-
-func (any *uint64Any) ToUint32() uint32 {
-	return uint32(any.val)
-}
-
-func (any *uint64Any) ToUint64() uint64 {
-	return any.val
-}
-
-func (any *uint64Any) ToFloat32() float32 {
-	return float32(any.val)
-}
-
-func (any *uint64Any) ToFloat64() float64 {
-	return float64(any.val)
-}
-
-func (any *uint64Any) ToString() string {
-	return strconv.FormatUint(any.val, 10)
-}
-
-func (any *uint64Any) WriteTo(stream *Stream) {
-	stream.WriteUint64(any.val)
-}
-
-func (any *uint64Any) Parse() *Iterator {
-	return nil
-}
-
-func (any *uint64Any) GetInterface() interface{} {
-	return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
deleted file mode 100755
index b45ef68..0000000
--- a/vendor/github.com/json-iterator/go/build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -e
-set -x
-
-if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
-    mkdir -p /tmp/build-golang/src/github.com/json-iterator
-    ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
-fi
-export GOPATH=/tmp/build-golang
-go get -u github.com/golang/dep/cmd/dep
-cd /tmp/build-golang/src/github.com/json-iterator/go
-exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
deleted file mode 100644
index 8c58fcb..0000000
--- a/vendor/github.com/json-iterator/go/config.go
+++ /dev/null
@@ -1,375 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/json"
-	"io"
-	"reflect"
-	"sync"
-	"unsafe"
-
-	"github.com/modern-go/concurrent"
-	"github.com/modern-go/reflect2"
-)
-
-// Config customize how the API should behave.
-// The API is created from Config by Froze.
-type Config struct {
-	IndentionStep                 int
-	MarshalFloatWith6Digits       bool
-	EscapeHTML                    bool
-	SortMapKeys                   bool
-	UseNumber                     bool
-	DisallowUnknownFields         bool
-	TagKey                        string
-	OnlyTaggedField               bool
-	ValidateJsonRawMessage        bool
-	ObjectFieldMustBeSimpleString bool
-	CaseSensitive                 bool
-}
-
-// API the public interface of this package.
-// Primary Marshal and Unmarshal.
-type API interface {
-	IteratorPool
-	StreamPool
-	MarshalToString(v interface{}) (string, error)
-	Marshal(v interface{}) ([]byte, error)
-	MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
-	UnmarshalFromString(str string, v interface{}) error
-	Unmarshal(data []byte, v interface{}) error
-	Get(data []byte, path ...interface{}) Any
-	NewEncoder(writer io.Writer) *Encoder
-	NewDecoder(reader io.Reader) *Decoder
-	Valid(data []byte) bool
-	RegisterExtension(extension Extension)
-	DecoderOf(typ reflect2.Type) ValDecoder
-	EncoderOf(typ reflect2.Type) ValEncoder
-}
-
-// ConfigDefault the default API
-var ConfigDefault = Config{
-	EscapeHTML: true,
-}.Froze()
-
-// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
-var ConfigCompatibleWithStandardLibrary = Config{
-	EscapeHTML:             true,
-	SortMapKeys:            true,
-	ValidateJsonRawMessage: true,
-}.Froze()
-
-// ConfigFastest marshals float with only 6 digits precision
-var ConfigFastest = Config{
-	EscapeHTML:                    false,
-	MarshalFloatWith6Digits:       true, // will lose precession
-	ObjectFieldMustBeSimpleString: true, // do not unescape object field
-}.Froze()
-
-type frozenConfig struct {
-	configBeforeFrozen            Config
-	sortMapKeys                   bool
-	indentionStep                 int
-	objectFieldMustBeSimpleString bool
-	onlyTaggedField               bool
-	disallowUnknownFields         bool
-	decoderCache                  *concurrent.Map
-	encoderCache                  *concurrent.Map
-	encoderExtension              Extension
-	decoderExtension              Extension
-	extraExtensions               []Extension
-	streamPool                    *sync.Pool
-	iteratorPool                  *sync.Pool
-	caseSensitive                 bool
-}
-
-func (cfg *frozenConfig) initCache() {
-	cfg.decoderCache = concurrent.NewMap()
-	cfg.encoderCache = concurrent.NewMap()
-}
-
-func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
-	cfg.decoderCache.Store(cacheKey, decoder)
-}
-
-func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
-	cfg.encoderCache.Store(cacheKey, encoder)
-}
-
-func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
-	decoder, found := cfg.decoderCache.Load(cacheKey)
-	if found {
-		return decoder.(ValDecoder)
-	}
-	return nil
-}
-
-func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
-	encoder, found := cfg.encoderCache.Load(cacheKey)
-	if found {
-		return encoder.(ValEncoder)
-	}
-	return nil
-}
-
-var cfgCache = concurrent.NewMap()
-
-func getFrozenConfigFromCache(cfg Config) *frozenConfig {
-	obj, found := cfgCache.Load(cfg)
-	if found {
-		return obj.(*frozenConfig)
-	}
-	return nil
-}
-
-func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
-	cfgCache.Store(cfg, frozenConfig)
-}
-
-// Froze forge API from config
-func (cfg Config) Froze() API {
-	api := &frozenConfig{
-		sortMapKeys:                   cfg.SortMapKeys,
-		indentionStep:                 cfg.IndentionStep,
-		objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
-		onlyTaggedField:               cfg.OnlyTaggedField,
-		disallowUnknownFields:         cfg.DisallowUnknownFields,
-		caseSensitive:                 cfg.CaseSensitive,
-	}
-	api.streamPool = &sync.Pool{
-		New: func() interface{} {
-			return NewStream(api, nil, 512)
-		},
-	}
-	api.iteratorPool = &sync.Pool{
-		New: func() interface{} {
-			return NewIterator(api)
-		},
-	}
-	api.initCache()
-	encoderExtension := EncoderExtension{}
-	decoderExtension := DecoderExtension{}
-	if cfg.MarshalFloatWith6Digits {
-		api.marshalFloatWith6Digits(encoderExtension)
-	}
-	if cfg.EscapeHTML {
-		api.escapeHTML(encoderExtension)
-	}
-	if cfg.UseNumber {
-		api.useNumber(decoderExtension)
-	}
-	if cfg.ValidateJsonRawMessage {
-		api.validateJsonRawMessage(encoderExtension)
-	}
-	api.encoderExtension = encoderExtension
-	api.decoderExtension = decoderExtension
-	api.configBeforeFrozen = cfg
-	return api
-}
-
-func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
-	api := getFrozenConfigFromCache(cfg)
-	if api != nil {
-		return api
-	}
-	api = cfg.Froze().(*frozenConfig)
-	for _, extension := range extraExtensions {
-		api.RegisterExtension(extension)
-	}
-	addFrozenConfigToCache(cfg, api)
-	return api
-}
-
-func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
-	encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
-		rawMessage := *(*json.RawMessage)(ptr)
-		iter := cfg.BorrowIterator([]byte(rawMessage))
-		iter.Read()
-		if iter.Error != nil {
-			stream.WriteRaw("null")
-		} else {
-			cfg.ReturnIterator(iter)
-			stream.WriteRaw(string(rawMessage))
-		}
-	}, func(ptr unsafe.Pointer) bool {
-		return len(*((*json.RawMessage)(ptr))) == 0
-	}}
-	extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
-	extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
-}
-
-func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
-	extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
-		exitingValue := *((*interface{})(ptr))
-		if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
-			iter.ReadVal(exitingValue)
-			return
-		}
-		if iter.WhatIsNext() == NumberValue {
-			*((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
-		} else {
-			*((*interface{})(ptr)) = iter.Read()
-		}
-	}}
-}
-func (cfg *frozenConfig) getTagKey() string {
-	tagKey := cfg.configBeforeFrozen.TagKey
-	if tagKey == "" {
-		return "json"
-	}
-	return tagKey
-}
-
-func (cfg *frozenConfig) RegisterExtension(extension Extension) {
-	cfg.extraExtensions = append(cfg.extraExtensions, extension)
-	copied := cfg.configBeforeFrozen
-	cfg.configBeforeFrozen = copied
-}
-
-type lossyFloat32Encoder struct {
-}
-
-func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteFloat32Lossy(*((*float32)(ptr)))
-}
-
-func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*float32)(ptr)) == 0
-}
-
-type lossyFloat64Encoder struct {
-}
-
-func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteFloat64Lossy(*((*float64)(ptr)))
-}
-
-func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*float64)(ptr)) == 0
-}
-
-// EnableLossyFloatMarshalling keeps 10**(-6) precision
-// for float variables for better performance.
-func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
-	// for better performance
-	extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
-	extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
-}
-
-type htmlEscapedStringEncoder struct {
-}
-
-func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	str := *((*string)(ptr))
-	stream.WriteStringWithHTMLEscaped(str)
-}
-
-func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*string)(ptr)) == ""
-}
-
-func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
-	encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
-}
-
-func (cfg *frozenConfig) cleanDecoders() {
-	typeDecoders = map[string]ValDecoder{}
-	fieldDecoders = map[string]ValDecoder{}
-	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) cleanEncoders() {
-	typeEncoders = map[string]ValEncoder{}
-	fieldEncoders = map[string]ValEncoder{}
-	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
-	stream := cfg.BorrowStream(nil)
-	defer cfg.ReturnStream(stream)
-	stream.WriteVal(v)
-	if stream.Error != nil {
-		return "", stream.Error
-	}
-	return string(stream.Buffer()), nil
-}
-
-func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
-	stream := cfg.BorrowStream(nil)
-	defer cfg.ReturnStream(stream)
-	stream.WriteVal(v)
-	if stream.Error != nil {
-		return nil, stream.Error
-	}
-	result := stream.Buffer()
-	copied := make([]byte, len(result))
-	copy(copied, result)
-	return copied, nil
-}
-
-func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
-	if prefix != "" {
-		panic("prefix is not supported")
-	}
-	for _, r := range indent {
-		if r != ' ' {
-			panic("indent can only be space")
-		}
-	}
-	newCfg := cfg.configBeforeFrozen
-	newCfg.IndentionStep = len(indent)
-	return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
-}
-
-func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
-	data := []byte(str)
-	iter := cfg.BorrowIterator(data)
-	defer cfg.ReturnIterator(iter)
-	iter.ReadVal(v)
-	c := iter.nextToken()
-	if c == 0 {
-		if iter.Error == io.EOF {
-			return nil
-		}
-		return iter.Error
-	}
-	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
-	return iter.Error
-}
-
-func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
-	iter := cfg.BorrowIterator(data)
-	defer cfg.ReturnIterator(iter)
-	return locatePath(iter, path)
-}
-
-func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
-	iter := cfg.BorrowIterator(data)
-	defer cfg.ReturnIterator(iter)
-	iter.ReadVal(v)
-	c := iter.nextToken()
-	if c == 0 {
-		if iter.Error == io.EOF {
-			return nil
-		}
-		return iter.Error
-	}
-	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
-	return iter.Error
-}
-
-func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
-	stream := NewStream(cfg, writer, 512)
-	return &Encoder{stream}
-}
-
-func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
-	iter := Parse(cfg, reader, 512)
-	return &Decoder{iter}
-}
-
-func (cfg *frozenConfig) Valid(data []byte) bool {
-	iter := cfg.BorrowIterator(data)
-	defer cfg.ReturnIterator(iter)
-	iter.Skip()
-	return iter.Error == nil
-}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
deleted file mode 100644
index 3095662..0000000
--- a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
+++ /dev/null
@@ -1,7 +0,0 @@
-| json type \ dest type | bool | int | uint | float |string|
-| --- | --- | --- | --- |--|--|
-| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
-| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/>  "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
-| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
-| object | true | 0 | 0 |0|originnal json|
-| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
deleted file mode 100644
index 95ae54f..0000000
--- a/vendor/github.com/json-iterator/go/iter.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-)
-
-// ValueType the type for JSON element
-type ValueType int
-
-const (
-	// InvalidValue invalid JSON element
-	InvalidValue ValueType = iota
-	// StringValue JSON element "string"
-	StringValue
-	// NumberValue JSON element 100 or 0.10
-	NumberValue
-	// NilValue JSON element null
-	NilValue
-	// BoolValue JSON element true or false
-	BoolValue
-	// ArrayValue JSON element []
-	ArrayValue
-	// ObjectValue JSON element {}
-	ObjectValue
-)
-
-var hexDigits []byte
-var valueTypes []ValueType
-
-func init() {
-	hexDigits = make([]byte, 256)
-	for i := 0; i < len(hexDigits); i++ {
-		hexDigits[i] = 255
-	}
-	for i := '0'; i <= '9'; i++ {
-		hexDigits[i] = byte(i - '0')
-	}
-	for i := 'a'; i <= 'f'; i++ {
-		hexDigits[i] = byte((i - 'a') + 10)
-	}
-	for i := 'A'; i <= 'F'; i++ {
-		hexDigits[i] = byte((i - 'A') + 10)
-	}
-	valueTypes = make([]ValueType, 256)
-	for i := 0; i < len(valueTypes); i++ {
-		valueTypes[i] = InvalidValue
-	}
-	valueTypes['"'] = StringValue
-	valueTypes['-'] = NumberValue
-	valueTypes['0'] = NumberValue
-	valueTypes['1'] = NumberValue
-	valueTypes['2'] = NumberValue
-	valueTypes['3'] = NumberValue
-	valueTypes['4'] = NumberValue
-	valueTypes['5'] = NumberValue
-	valueTypes['6'] = NumberValue
-	valueTypes['7'] = NumberValue
-	valueTypes['8'] = NumberValue
-	valueTypes['9'] = NumberValue
-	valueTypes['t'] = BoolValue
-	valueTypes['f'] = BoolValue
-	valueTypes['n'] = NilValue
-	valueTypes['['] = ArrayValue
-	valueTypes['{'] = ObjectValue
-}
-
-// Iterator is a io.Reader like object, with JSON specific read functions.
-// Error is not returned as return value, but stored as Error member on this iterator instance.
-type Iterator struct {
-	cfg              *frozenConfig
-	reader           io.Reader
-	buf              []byte
-	head             int
-	tail             int
-	captureStartedAt int
-	captured         []byte
-	Error            error
-	Attachment       interface{} // open for customized decoder
-}
-
-// NewIterator creates an empty Iterator instance
-func NewIterator(cfg API) *Iterator {
-	return &Iterator{
-		cfg:    cfg.(*frozenConfig),
-		reader: nil,
-		buf:    nil,
-		head:   0,
-		tail:   0,
-	}
-}
-
-// Parse creates an Iterator instance from io.Reader
-func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
-	return &Iterator{
-		cfg:    cfg.(*frozenConfig),
-		reader: reader,
-		buf:    make([]byte, bufSize),
-		head:   0,
-		tail:   0,
-	}
-}
-
-// ParseBytes creates an Iterator instance from byte array
-func ParseBytes(cfg API, input []byte) *Iterator {
-	return &Iterator{
-		cfg:    cfg.(*frozenConfig),
-		reader: nil,
-		buf:    input,
-		head:   0,
-		tail:   len(input),
-	}
-}
-
-// ParseString creates an Iterator instance from string
-func ParseString(cfg API, input string) *Iterator {
-	return ParseBytes(cfg, []byte(input))
-}
-
-// Pool returns a pool can provide more iterator with same configuration
-func (iter *Iterator) Pool() IteratorPool {
-	return iter.cfg
-}
-
-// Reset reuse iterator instance by specifying another reader
-func (iter *Iterator) Reset(reader io.Reader) *Iterator {
-	iter.reader = reader
-	iter.head = 0
-	iter.tail = 0
-	return iter
-}
-
-// ResetBytes reuse iterator instance by specifying another byte array as input
-func (iter *Iterator) ResetBytes(input []byte) *Iterator {
-	iter.reader = nil
-	iter.buf = input
-	iter.head = 0
-	iter.tail = len(input)
-	return iter
-}
-
-// WhatIsNext gets ValueType of relatively next json element
-func (iter *Iterator) WhatIsNext() ValueType {
-	valueType := valueTypes[iter.nextToken()]
-	iter.unreadByte()
-	return valueType
-}
-
-func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
-	for i := iter.head; i < iter.tail; i++ {
-		c := iter.buf[i]
-		switch c {
-		case ' ', '\n', '\t', '\r':
-			continue
-		}
-		iter.head = i
-		return false
-	}
-	return true
-}
-
-func (iter *Iterator) isObjectEnd() bool {
-	c := iter.nextToken()
-	if c == ',' {
-		return false
-	}
-	if c == '}' {
-		return true
-	}
-	iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
-	return true
-}
-
-func (iter *Iterator) nextToken() byte {
-	// a variation of skip whitespaces, returning the next non-whitespace token
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			c := iter.buf[i]
-			switch c {
-			case ' ', '\n', '\t', '\r':
-				continue
-			}
-			iter.head = i + 1
-			return c
-		}
-		if !iter.loadMore() {
-			return 0
-		}
-	}
-}
-
-// ReportError record a error in iterator instance with current position.
-func (iter *Iterator) ReportError(operation string, msg string) {
-	if iter.Error != nil {
-		if iter.Error != io.EOF {
-			return
-		}
-	}
-	peekStart := iter.head - 10
-	if peekStart < 0 {
-		peekStart = 0
-	}
-	peekEnd := iter.head + 10
-	if peekEnd > iter.tail {
-		peekEnd = iter.tail
-	}
-	parsing := string(iter.buf[peekStart:peekEnd])
-	contextStart := iter.head - 50
-	if contextStart < 0 {
-		contextStart = 0
-	}
-	contextEnd := iter.head + 50
-	if contextEnd > iter.tail {
-		contextEnd = iter.tail
-	}
-	context := string(iter.buf[contextStart:contextEnd])
-	iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
-		operation, msg, iter.head-peekStart, parsing, context)
-}
-
-// CurrentBuffer gets current buffer as string for debugging purpose
-func (iter *Iterator) CurrentBuffer() string {
-	peekStart := iter.head - 10
-	if peekStart < 0 {
-		peekStart = 0
-	}
-	return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
-		string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
-}
-
-func (iter *Iterator) readByte() (ret byte) {
-	if iter.head == iter.tail {
-		if iter.loadMore() {
-			ret = iter.buf[iter.head]
-			iter.head++
-			return ret
-		}
-		return 0
-	}
-	ret = iter.buf[iter.head]
-	iter.head++
-	return ret
-}
-
-func (iter *Iterator) loadMore() bool {
-	if iter.reader == nil {
-		if iter.Error == nil {
-			iter.head = iter.tail
-			iter.Error = io.EOF
-		}
-		return false
-	}
-	if iter.captured != nil {
-		iter.captured = append(iter.captured,
-			iter.buf[iter.captureStartedAt:iter.tail]...)
-		iter.captureStartedAt = 0
-	}
-	for {
-		n, err := iter.reader.Read(iter.buf)
-		if n == 0 {
-			if err != nil {
-				if iter.Error == nil {
-					iter.Error = err
-				}
-				return false
-			}
-		} else {
-			iter.head = 0
-			iter.tail = n
-			return true
-		}
-	}
-}
-
-func (iter *Iterator) unreadByte() {
-	if iter.Error != nil {
-		return
-	}
-	iter.head--
-	return
-}
-
-// Read read the next JSON element as generic interface{}.
-func (iter *Iterator) Read() interface{} {
-	valueType := iter.WhatIsNext()
-	switch valueType {
-	case StringValue:
-		return iter.ReadString()
-	case NumberValue:
-		if iter.cfg.configBeforeFrozen.UseNumber {
-			return json.Number(iter.readNumberAsString())
-		}
-		return iter.ReadFloat64()
-	case NilValue:
-		iter.skipFourBytes('n', 'u', 'l', 'l')
-		return nil
-	case BoolValue:
-		return iter.ReadBool()
-	case ArrayValue:
-		arr := []interface{}{}
-		iter.ReadArrayCB(func(iter *Iterator) bool {
-			var elem interface{}
-			iter.ReadVal(&elem)
-			arr = append(arr, elem)
-			return true
-		})
-		return arr
-	case ObjectValue:
-		obj := map[string]interface{}{}
-		iter.ReadMapCB(func(Iter *Iterator, field string) bool {
-			var elem interface{}
-			iter.ReadVal(&elem)
-			obj[field] = elem
-			return true
-		})
-		return obj
-	default:
-		iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
-		return nil
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
deleted file mode 100644
index 6188cb4..0000000
--- a/vendor/github.com/json-iterator/go/iter_array.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package jsoniter
-
-// ReadArray read array element, tells if the array has more element to read.
-func (iter *Iterator) ReadArray() (ret bool) {
-	c := iter.nextToken()
-	switch c {
-	case 'n':
-		iter.skipThreeBytes('u', 'l', 'l')
-		return false // null
-	case '[':
-		c = iter.nextToken()
-		if c != ']' {
-			iter.unreadByte()
-			return true
-		}
-		return false
-	case ']':
-		return false
-	case ',':
-		return true
-	default:
-		iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
-		return
-	}
-}
-
-// ReadArrayCB read array with callback
-func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
-	c := iter.nextToken()
-	if c == '[' {
-		c = iter.nextToken()
-		if c != ']' {
-			iter.unreadByte()
-			if !callback(iter) {
-				return false
-			}
-			c = iter.nextToken()
-			for c == ',' {
-				if !callback(iter) {
-					return false
-				}
-				c = iter.nextToken()
-			}
-			if c != ']' {
-				iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
-				return false
-			}
-			return true
-		}
-		return true
-	}
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return true // null
-	}
-	iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
-	return false
-}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
deleted file mode 100644
index b975463..0000000
--- a/vendor/github.com/json-iterator/go/iter_float.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/json"
-	"io"
-	"math/big"
-	"strconv"
-	"strings"
-	"unsafe"
-)
-
-var floatDigits []int8
-
-const invalidCharForNumber = int8(-1)
-const endOfNumber = int8(-2)
-const dotInNumber = int8(-3)
-
-func init() {
-	floatDigits = make([]int8, 256)
-	for i := 0; i < len(floatDigits); i++ {
-		floatDigits[i] = invalidCharForNumber
-	}
-	for i := int8('0'); i <= int8('9'); i++ {
-		floatDigits[i] = i - int8('0')
-	}
-	floatDigits[','] = endOfNumber
-	floatDigits[']'] = endOfNumber
-	floatDigits['}'] = endOfNumber
-	floatDigits[' '] = endOfNumber
-	floatDigits['\t'] = endOfNumber
-	floatDigits['\n'] = endOfNumber
-	floatDigits['.'] = dotInNumber
-}
-
-// ReadBigFloat read big.Float
-func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
-	str := iter.readNumberAsString()
-	if iter.Error != nil && iter.Error != io.EOF {
-		return nil
-	}
-	prec := 64
-	if len(str) > prec {
-		prec = len(str)
-	}
-	val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
-	if err != nil {
-		iter.Error = err
-		return nil
-	}
-	return val
-}
-
-// ReadBigInt read big.Int
-func (iter *Iterator) ReadBigInt() (ret *big.Int) {
-	str := iter.readNumberAsString()
-	if iter.Error != nil && iter.Error != io.EOF {
-		return nil
-	}
-	ret = big.NewInt(0)
-	var success bool
-	ret, success = ret.SetString(str, 10)
-	if !success {
-		iter.ReportError("ReadBigInt", "invalid big int")
-		return nil
-	}
-	return ret
-}
-
-//ReadFloat32 read float32
-func (iter *Iterator) ReadFloat32() (ret float32) {
-	c := iter.nextToken()
-	if c == '-' {
-		return -iter.readPositiveFloat32()
-	}
-	iter.unreadByte()
-	return iter.readPositiveFloat32()
-}
-
-func (iter *Iterator) readPositiveFloat32() (ret float32) {
-	i := iter.head
-	// first char
-	if i == iter.tail {
-		return iter.readFloat32SlowPath()
-	}
-	c := iter.buf[i]
-	i++
-	ind := floatDigits[c]
-	switch ind {
-	case invalidCharForNumber:
-		return iter.readFloat32SlowPath()
-	case endOfNumber:
-		iter.ReportError("readFloat32", "empty number")
-		return
-	case dotInNumber:
-		iter.ReportError("readFloat32", "leading dot is invalid")
-		return
-	case 0:
-		if i == iter.tail {
-			return iter.readFloat32SlowPath()
-		}
-		c = iter.buf[i]
-		switch c {
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-			iter.ReportError("readFloat32", "leading zero is invalid")
-			return
-		}
-	}
-	value := uint64(ind)
-	// chars before dot
-non_decimal_loop:
-	for ; i < iter.tail; i++ {
-		c = iter.buf[i]
-		ind := floatDigits[c]
-		switch ind {
-		case invalidCharForNumber:
-			return iter.readFloat32SlowPath()
-		case endOfNumber:
-			iter.head = i
-			return float32(value)
-		case dotInNumber:
-			break non_decimal_loop
-		}
-		if value > uint64SafeToMultiple10 {
-			return iter.readFloat32SlowPath()
-		}
-		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
-	}
-	// chars after dot
-	if c == '.' {
-		i++
-		decimalPlaces := 0
-		if i == iter.tail {
-			return iter.readFloat32SlowPath()
-		}
-		for ; i < iter.tail; i++ {
-			c = iter.buf[i]
-			ind := floatDigits[c]
-			switch ind {
-			case endOfNumber:
-				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
-					iter.head = i
-					return float32(float64(value) / float64(pow10[decimalPlaces]))
-				}
-				// too many decimal places
-				return iter.readFloat32SlowPath()
-			case invalidCharForNumber, dotInNumber:
-				return iter.readFloat32SlowPath()
-			}
-			decimalPlaces++
-			if value > uint64SafeToMultiple10 {
-				return iter.readFloat32SlowPath()
-			}
-			value = (value << 3) + (value << 1) + uint64(ind)
-		}
-	}
-	return iter.readFloat32SlowPath()
-}
-
-func (iter *Iterator) readNumberAsString() (ret string) {
-	strBuf := [16]byte{}
-	str := strBuf[0:0]
-load_loop:
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			c := iter.buf[i]
-			switch c {
-			case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-				str = append(str, c)
-				continue
-			default:
-				iter.head = i
-				break load_loop
-			}
-		}
-		if !iter.loadMore() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		return
-	}
-	if len(str) == 0 {
-		iter.ReportError("readNumberAsString", "invalid number")
-	}
-	return *(*string)(unsafe.Pointer(&str))
-}
-
-func (iter *Iterator) readFloat32SlowPath() (ret float32) {
-	str := iter.readNumberAsString()
-	if iter.Error != nil && iter.Error != io.EOF {
-		return
-	}
-	errMsg := validateFloat(str)
-	if errMsg != "" {
-		iter.ReportError("readFloat32SlowPath", errMsg)
-		return
-	}
-	val, err := strconv.ParseFloat(str, 32)
-	if err != nil {
-		iter.Error = err
-		return
-	}
-	return float32(val)
-}
-
-// ReadFloat64 read float64
-func (iter *Iterator) ReadFloat64() (ret float64) {
-	c := iter.nextToken()
-	if c == '-' {
-		return -iter.readPositiveFloat64()
-	}
-	iter.unreadByte()
-	return iter.readPositiveFloat64()
-}
-
-func (iter *Iterator) readPositiveFloat64() (ret float64) {
-	i := iter.head
-	// first char
-	if i == iter.tail {
-		return iter.readFloat64SlowPath()
-	}
-	c := iter.buf[i]
-	i++
-	ind := floatDigits[c]
-	switch ind {
-	case invalidCharForNumber:
-		return iter.readFloat64SlowPath()
-	case endOfNumber:
-		iter.ReportError("readFloat64", "empty number")
-		return
-	case dotInNumber:
-		iter.ReportError("readFloat64", "leading dot is invalid")
-		return
-	case 0:
-		if i == iter.tail {
-			return iter.readFloat64SlowPath()
-		}
-		c = iter.buf[i]
-		switch c {
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-			iter.ReportError("readFloat64", "leading zero is invalid")
-			return
-		}
-	}
-	value := uint64(ind)
-	// chars before dot
-non_decimal_loop:
-	for ; i < iter.tail; i++ {
-		c = iter.buf[i]
-		ind := floatDigits[c]
-		switch ind {
-		case invalidCharForNumber:
-			return iter.readFloat64SlowPath()
-		case endOfNumber:
-			iter.head = i
-			return float64(value)
-		case dotInNumber:
-			break non_decimal_loop
-		}
-		if value > uint64SafeToMultiple10 {
-			return iter.readFloat64SlowPath()
-		}
-		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
-	}
-	// chars after dot
-	if c == '.' {
-		i++
-		decimalPlaces := 0
-		if i == iter.tail {
-			return iter.readFloat64SlowPath()
-		}
-		for ; i < iter.tail; i++ {
-			c = iter.buf[i]
-			ind := floatDigits[c]
-			switch ind {
-			case endOfNumber:
-				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
-					iter.head = i
-					return float64(value) / float64(pow10[decimalPlaces])
-				}
-				// too many decimal places
-				return iter.readFloat64SlowPath()
-			case invalidCharForNumber, dotInNumber:
-				return iter.readFloat64SlowPath()
-			}
-			decimalPlaces++
-			if value > uint64SafeToMultiple10 {
-				return iter.readFloat64SlowPath()
-			}
-			value = (value << 3) + (value << 1) + uint64(ind)
-		}
-	}
-	return iter.readFloat64SlowPath()
-}
-
-func (iter *Iterator) readFloat64SlowPath() (ret float64) {
-	str := iter.readNumberAsString()
-	if iter.Error != nil && iter.Error != io.EOF {
-		return
-	}
-	errMsg := validateFloat(str)
-	if errMsg != "" {
-		iter.ReportError("readFloat64SlowPath", errMsg)
-		return
-	}
-	val, err := strconv.ParseFloat(str, 64)
-	if err != nil {
-		iter.Error = err
-		return
-	}
-	return val
-}
-
-func validateFloat(str string) string {
-	// strconv.ParseFloat is not validating `1.` or `1.e1`
-	if len(str) == 0 {
-		return "empty number"
-	}
-	if str[0] == '-' {
-		return "-- is not valid"
-	}
-	dotPos := strings.IndexByte(str, '.')
-	if dotPos != -1 {
-		if dotPos == len(str)-1 {
-			return "dot can not be last character"
-		}
-		switch str[dotPos+1] {
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		default:
-			return "missing digit after dot"
-		}
-	}
-	return ""
-}
-
-// ReadNumber read json.Number
-func (iter *Iterator) ReadNumber() (ret json.Number) {
-	return json.Number(iter.readNumberAsString())
-}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
deleted file mode 100644
index 2142320..0000000
--- a/vendor/github.com/json-iterator/go/iter_int.go
+++ /dev/null
@@ -1,345 +0,0 @@
-package jsoniter
-
-import (
-	"math"
-	"strconv"
-)
-
-var intDigits []int8
-
-const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
-const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
-
-func init() {
-	intDigits = make([]int8, 256)
-	for i := 0; i < len(intDigits); i++ {
-		intDigits[i] = invalidCharForNumber
-	}
-	for i := int8('0'); i <= int8('9'); i++ {
-		intDigits[i] = i - int8('0')
-	}
-}
-
-// ReadUint read uint
-func (iter *Iterator) ReadUint() uint {
-	if strconv.IntSize == 32 {
-		return uint(iter.ReadUint32())
-	}
-	return uint(iter.ReadUint64())
-}
-
-// ReadInt read int
-func (iter *Iterator) ReadInt() int {
-	if strconv.IntSize == 32 {
-		return int(iter.ReadInt32())
-	}
-	return int(iter.ReadInt64())
-}
-
-// ReadInt8 read int8
-func (iter *Iterator) ReadInt8() (ret int8) {
-	c := iter.nextToken()
-	if c == '-' {
-		val := iter.readUint32(iter.readByte())
-		if val > math.MaxInt8+1 {
-			iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
-			return
-		}
-		return -int8(val)
-	}
-	val := iter.readUint32(c)
-	if val > math.MaxInt8 {
-		iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
-		return
-	}
-	return int8(val)
-}
-
-// ReadUint8 read uint8
-func (iter *Iterator) ReadUint8() (ret uint8) {
-	val := iter.readUint32(iter.nextToken())
-	if val > math.MaxUint8 {
-		iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
-		return
-	}
-	return uint8(val)
-}
-
-// ReadInt16 read int16
-func (iter *Iterator) ReadInt16() (ret int16) {
-	c := iter.nextToken()
-	if c == '-' {
-		val := iter.readUint32(iter.readByte())
-		if val > math.MaxInt16+1 {
-			iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
-			return
-		}
-		return -int16(val)
-	}
-	val := iter.readUint32(c)
-	if val > math.MaxInt16 {
-		iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
-		return
-	}
-	return int16(val)
-}
-
-// ReadUint16 read uint16
-func (iter *Iterator) ReadUint16() (ret uint16) {
-	val := iter.readUint32(iter.nextToken())
-	if val > math.MaxUint16 {
-		iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
-		return
-	}
-	return uint16(val)
-}
-
-// ReadInt32 read int32
-func (iter *Iterator) ReadInt32() (ret int32) {
-	c := iter.nextToken()
-	if c == '-' {
-		val := iter.readUint32(iter.readByte())
-		if val > math.MaxInt32+1 {
-			iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
-			return
-		}
-		return -int32(val)
-	}
-	val := iter.readUint32(c)
-	if val > math.MaxInt32 {
-		iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
-		return
-	}
-	return int32(val)
-}
-
-// ReadUint32 read uint32
-func (iter *Iterator) ReadUint32() (ret uint32) {
-	return iter.readUint32(iter.nextToken())
-}
-
-func (iter *Iterator) readUint32(c byte) (ret uint32) {
-	ind := intDigits[c]
-	if ind == 0 {
-		iter.assertInteger()
-		return 0 // single zero
-	}
-	if ind == invalidCharForNumber {
-		iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
-		return
-	}
-	value := uint32(ind)
-	if iter.tail-iter.head > 10 {
-		i := iter.head
-		ind2 := intDigits[iter.buf[i]]
-		if ind2 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value
-		}
-		i++
-		ind3 := intDigits[iter.buf[i]]
-		if ind3 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*10 + uint32(ind2)
-		}
-		//iter.head = i + 1
-		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
-		i++
-		ind4 := intDigits[iter.buf[i]]
-		if ind4 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*100 + uint32(ind2)*10 + uint32(ind3)
-		}
-		i++
-		ind5 := intDigits[iter.buf[i]]
-		if ind5 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
-		}
-		i++
-		ind6 := intDigits[iter.buf[i]]
-		if ind6 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
-		}
-		i++
-		ind7 := intDigits[iter.buf[i]]
-		if ind7 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
-		}
-		i++
-		ind8 := intDigits[iter.buf[i]]
-		if ind8 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
-		}
-		i++
-		ind9 := intDigits[iter.buf[i]]
-		value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
-		iter.head = i
-		if ind9 == invalidCharForNumber {
-			iter.assertInteger()
-			return value
-		}
-	}
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			ind = intDigits[iter.buf[i]]
-			if ind == invalidCharForNumber {
-				iter.head = i
-				iter.assertInteger()
-				return value
-			}
-			if value > uint32SafeToMultiply10 {
-				value2 := (value << 3) + (value << 1) + uint32(ind)
-				if value2 < value {
-					iter.ReportError("readUint32", "overflow")
-					return
-				}
-				value = value2
-				continue
-			}
-			value = (value << 3) + (value << 1) + uint32(ind)
-		}
-		if !iter.loadMore() {
-			iter.assertInteger()
-			return value
-		}
-	}
-}
-
-// ReadInt64 read int64
-func (iter *Iterator) ReadInt64() (ret int64) {
-	c := iter.nextToken()
-	if c == '-' {
-		val := iter.readUint64(iter.readByte())
-		if val > math.MaxInt64+1 {
-			iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
-			return
-		}
-		return -int64(val)
-	}
-	val := iter.readUint64(c)
-	if val > math.MaxInt64 {
-		iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
-		return
-	}
-	return int64(val)
-}
-
-// ReadUint64 read uint64
-func (iter *Iterator) ReadUint64() uint64 {
-	return iter.readUint64(iter.nextToken())
-}
-
-func (iter *Iterator) readUint64(c byte) (ret uint64) {
-	ind := intDigits[c]
-	if ind == 0 {
-		iter.assertInteger()
-		return 0 // single zero
-	}
-	if ind == invalidCharForNumber {
-		iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
-		return
-	}
-	value := uint64(ind)
-	if iter.tail-iter.head > 10 {
-		i := iter.head
-		ind2 := intDigits[iter.buf[i]]
-		if ind2 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value
-		}
-		i++
-		ind3 := intDigits[iter.buf[i]]
-		if ind3 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*10 + uint64(ind2)
-		}
-		//iter.head = i + 1
-		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
-		i++
-		ind4 := intDigits[iter.buf[i]]
-		if ind4 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*100 + uint64(ind2)*10 + uint64(ind3)
-		}
-		i++
-		ind5 := intDigits[iter.buf[i]]
-		if ind5 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
-		}
-		i++
-		ind6 := intDigits[iter.buf[i]]
-		if ind6 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
-		}
-		i++
-		ind7 := intDigits[iter.buf[i]]
-		if ind7 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
-		}
-		i++
-		ind8 := intDigits[iter.buf[i]]
-		if ind8 == invalidCharForNumber {
-			iter.head = i
-			iter.assertInteger()
-			return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
-		}
-		i++
-		ind9 := intDigits[iter.buf[i]]
-		value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
-		iter.head = i
-		if ind9 == invalidCharForNumber {
-			iter.assertInteger()
-			return value
-		}
-	}
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			ind = intDigits[iter.buf[i]]
-			if ind == invalidCharForNumber {
-				iter.head = i
-				iter.assertInteger()
-				return value
-			}
-			if value > uint64SafeToMultiple10 {
-				value2 := (value << 3) + (value << 1) + uint64(ind)
-				if value2 < value {
-					iter.ReportError("readUint64", "overflow")
-					return
-				}
-				value = value2
-				continue
-			}
-			value = (value << 3) + (value << 1) + uint64(ind)
-		}
-		if !iter.loadMore() {
-			iter.assertInteger()
-			return value
-		}
-	}
-}
-
-func (iter *Iterator) assertInteger() {
-	if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
-		iter.ReportError("assertInteger", "can not decode float as int")
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
deleted file mode 100644
index 1c57576..0000000
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"strings"
-)
-
-// ReadObject read one field from object.
-// If object ended, returns empty string.
-// Otherwise, returns the field name.
-func (iter *Iterator) ReadObject() (ret string) {
-	c := iter.nextToken()
-	switch c {
-	case 'n':
-		iter.skipThreeBytes('u', 'l', 'l')
-		return "" // null
-	case '{':
-		c = iter.nextToken()
-		if c == '"' {
-			iter.unreadByte()
-			field := iter.ReadString()
-			c = iter.nextToken()
-			if c != ':' {
-				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-			}
-			return field
-		}
-		if c == '}' {
-			return "" // end of object
-		}
-		iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
-		return
-	case ',':
-		field := iter.ReadString()
-		c = iter.nextToken()
-		if c != ':' {
-			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-		}
-		return field
-	case '}':
-		return "" // end of object
-	default:
-		iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
-		return
-	}
-}
-
-// CaseInsensitive
-func (iter *Iterator) readFieldHash() int64 {
-	hash := int64(0x811c9dc5)
-	c := iter.nextToken()
-	if c != '"' {
-		iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
-		return 0
-	}
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			// require ascii string and no escape
-			b := iter.buf[i]
-			if b == '\\' {
-				iter.head = i
-				for _, b := range iter.readStringSlowPath() {
-					if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
-						b += 'a' - 'A'
-					}
-					hash ^= int64(b)
-					hash *= 0x1000193
-				}
-				c = iter.nextToken()
-				if c != ':' {
-					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
-					return 0
-				}
-				return hash
-			}
-			if b == '"' {
-				iter.head = i + 1
-				c = iter.nextToken()
-				if c != ':' {
-					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
-					return 0
-				}
-				return hash
-			}
-			if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
-				b += 'a' - 'A'
-			}
-			hash ^= int64(b)
-			hash *= 0x1000193
-		}
-		if !iter.loadMore() {
-			iter.ReportError("readFieldHash", `incomplete field name`)
-			return 0
-		}
-	}
-}
-
-func calcHash(str string, caseSensitive bool) int64 {
-	if !caseSensitive {
-		str = strings.ToLower(str)
-	}
-	hash := int64(0x811c9dc5)
-	for _, b := range []byte(str) {
-		hash ^= int64(b)
-		hash *= 0x1000193
-	}
-	return int64(hash)
-}
-
-// ReadObjectCB read object with callback, the key is ascii only and field name not copied
-func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
-	c := iter.nextToken()
-	var field string
-	if c == '{' {
-		c = iter.nextToken()
-		if c == '"' {
-			iter.unreadByte()
-			field = iter.ReadString()
-			c = iter.nextToken()
-			if c != ':' {
-				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-			}
-			if !callback(iter, field) {
-				return false
-			}
-			c = iter.nextToken()
-			for c == ',' {
-				field = iter.ReadString()
-				c = iter.nextToken()
-				if c != ':' {
-					iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-				}
-				if !callback(iter, field) {
-					return false
-				}
-				c = iter.nextToken()
-			}
-			if c != '}' {
-				iter.ReportError("ReadObjectCB", `object not ended with }`)
-				return false
-			}
-			return true
-		}
-		if c == '}' {
-			return true
-		}
-		iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
-		return false
-	}
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return true // null
-	}
-	iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
-	return false
-}
-
-// ReadMapCB read map with callback, the key can be any string
-func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
-	c := iter.nextToken()
-	if c == '{' {
-		c = iter.nextToken()
-		if c == '"' {
-			iter.unreadByte()
-			field := iter.ReadString()
-			if iter.nextToken() != ':' {
-				iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
-				return false
-			}
-			if !callback(iter, field) {
-				return false
-			}
-			c = iter.nextToken()
-			for c == ',' {
-				field = iter.ReadString()
-				if iter.nextToken() != ':' {
-					iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
-					return false
-				}
-				if !callback(iter, field) {
-					return false
-				}
-				c = iter.nextToken()
-			}
-			if c != '}' {
-				iter.ReportError("ReadMapCB", `object not ended with }`)
-				return false
-			}
-			return true
-		}
-		if c == '}' {
-			return true
-		}
-		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
-		return false
-	}
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return true // null
-	}
-	iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
-	return false
-}
-
-func (iter *Iterator) readObjectStart() bool {
-	c := iter.nextToken()
-	if c == '{' {
-		c = iter.nextToken()
-		if c == '}' {
-			return false
-		}
-		iter.unreadByte()
-		return true
-	} else if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return false
-	}
-	iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
-	return false
-}
-
-func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
-	str := iter.ReadStringAsSlice()
-	if iter.skipWhitespacesWithoutLoadMore() {
-		if ret == nil {
-			ret = make([]byte, len(str))
-			copy(ret, str)
-		}
-		if !iter.loadMore() {
-			return
-		}
-	}
-	if iter.buf[iter.head] != ':' {
-		iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
-		return
-	}
-	iter.head++
-	if iter.skipWhitespacesWithoutLoadMore() {
-		if ret == nil {
-			ret = make([]byte, len(str))
-			copy(ret, str)
-		}
-		if !iter.loadMore() {
-			return
-		}
-	}
-	if ret == nil {
-		return str
-	}
-	return ret
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
deleted file mode 100644
index f58beb9..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-// ReadNil reads a json object as nil and
-// returns whether it's a nil or not
-func (iter *Iterator) ReadNil() (ret bool) {
-	c := iter.nextToken()
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l') // null
-		return true
-	}
-	iter.unreadByte()
-	return false
-}
-
-// ReadBool reads a json object as BoolValue
-func (iter *Iterator) ReadBool() (ret bool) {
-	c := iter.nextToken()
-	if c == 't' {
-		iter.skipThreeBytes('r', 'u', 'e')
-		return true
-	}
-	if c == 'f' {
-		iter.skipFourBytes('a', 'l', 's', 'e')
-		return false
-	}
-	iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
-	return
-}
-
-// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
-// The []byte can be kept, it is a copy of data.
-func (iter *Iterator) SkipAndReturnBytes() []byte {
-	iter.startCapture(iter.head)
-	iter.Skip()
-	return iter.stopCapture()
-}
-
-type captureBuffer struct {
-	startedAt int
-	captured  []byte
-}
-
-func (iter *Iterator) startCapture(captureStartedAt int) {
-	if iter.captured != nil {
-		panic("already in capture mode")
-	}
-	iter.captureStartedAt = captureStartedAt
-	iter.captured = make([]byte, 0, 32)
-}
-
-func (iter *Iterator) stopCapture() []byte {
-	if iter.captured == nil {
-		panic("not in capture mode")
-	}
-	captured := iter.captured
-	remaining := iter.buf[iter.captureStartedAt:iter.head]
-	iter.captureStartedAt = -1
-	iter.captured = nil
-	if len(captured) == 0 {
-		copied := make([]byte, len(remaining))
-		copy(copied, remaining)
-		return copied
-	}
-	captured = append(captured, remaining...)
-	return captured
-}
-
-// Skip skips a json object and positions to relatively the next json object
-func (iter *Iterator) Skip() {
-	c := iter.nextToken()
-	switch c {
-	case '"':
-		iter.skipString()
-	case 'n':
-		iter.skipThreeBytes('u', 'l', 'l') // null
-	case 't':
-		iter.skipThreeBytes('r', 'u', 'e') // true
-	case 'f':
-		iter.skipFourBytes('a', 'l', 's', 'e') // false
-	case '0':
-		iter.unreadByte()
-		iter.ReadFloat32()
-	case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		iter.skipNumber()
-	case '[':
-		iter.skipArray()
-	case '{':
-		iter.skipObject()
-	default:
-		iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
-		return
-	}
-}
-
-func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
-	if iter.readByte() != b1 {
-		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
-		return
-	}
-	if iter.readByte() != b2 {
-		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
-		return
-	}
-	if iter.readByte() != b3 {
-		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
-		return
-	}
-	if iter.readByte() != b4 {
-		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
-		return
-	}
-}
-
-func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
-	if iter.readByte() != b1 {
-		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
-		return
-	}
-	if iter.readByte() != b2 {
-		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
-		return
-	}
-	if iter.readByte() != b3 {
-		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
-		return
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
deleted file mode 100644
index 8fcdc3b..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
+++ /dev/null
@@ -1,144 +0,0 @@
-//+build jsoniter_sloppy
-
-package jsoniter
-
-// sloppy but faster implementation, do not validate the input json
-
-func (iter *Iterator) skipNumber() {
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			c := iter.buf[i]
-			switch c {
-			case ' ', '\n', '\r', '\t', ',', '}', ']':
-				iter.head = i
-				return
-			}
-		}
-		if !iter.loadMore() {
-			return
-		}
-	}
-}
-
-func (iter *Iterator) skipArray() {
-	level := 1
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			switch iter.buf[i] {
-			case '"': // If inside string, skip it
-				iter.head = i + 1
-				iter.skipString()
-				i = iter.head - 1 // it will be i++ soon
-			case '[': // If open symbol, increase level
-				level++
-			case ']': // If close symbol, increase level
-				level--
-
-				// If we have returned to the original level, we're done
-				if level == 0 {
-					iter.head = i + 1
-					return
-				}
-			}
-		}
-		if !iter.loadMore() {
-			iter.ReportError("skipObject", "incomplete array")
-			return
-		}
-	}
-}
-
-func (iter *Iterator) skipObject() {
-	level := 1
-	for {
-		for i := iter.head; i < iter.tail; i++ {
-			switch iter.buf[i] {
-			case '"': // If inside string, skip it
-				iter.head = i + 1
-				iter.skipString()
-				i = iter.head - 1 // it will be i++ soon
-			case '{': // If open symbol, increase level
-				level++
-			case '}': // If close symbol, increase level
-				level--
-
-				// If we have returned to the original level, we're done
-				if level == 0 {
-					iter.head = i + 1
-					return
-				}
-			}
-		}
-		if !iter.loadMore() {
-			iter.ReportError("skipObject", "incomplete object")
-			return
-		}
-	}
-}
-
-func (iter *Iterator) skipString() {
-	for {
-		end, escaped := iter.findStringEnd()
-		if end == -1 {
-			if !iter.loadMore() {
-				iter.ReportError("skipString", "incomplete string")
-				return
-			}
-			if escaped {
-				iter.head = 1 // skip the first char as last char read is \
-			}
-		} else {
-			iter.head = end
-			return
-		}
-	}
-}
-
-// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
-// Tries to find the end of string
-// Support if string contains escaped quote symbols.
-func (iter *Iterator) findStringEnd() (int, bool) {
-	escaped := false
-	for i := iter.head; i < iter.tail; i++ {
-		c := iter.buf[i]
-		if c == '"' {
-			if !escaped {
-				return i + 1, false
-			}
-			j := i - 1
-			for {
-				if j < iter.head || iter.buf[j] != '\\' {
-					// even number of backslashes
-					// either end of buffer, or " found
-					return i + 1, true
-				}
-				j--
-				if j < iter.head || iter.buf[j] != '\\' {
-					// odd number of backslashes
-					// it is \" or \\\"
-					break
-				}
-				j--
-			}
-		} else if c == '\\' {
-			escaped = true
-		}
-	}
-	j := iter.tail - 1
-	for {
-		if j < iter.head || iter.buf[j] != '\\' {
-			// even number of backslashes
-			// either end of buffer, or " found
-			return -1, false // do not end with \
-		}
-		j--
-		if j < iter.head || iter.buf[j] != '\\' {
-			// odd number of backslashes
-			// it is \" or \\\"
-			break
-		}
-		j--
-
-	}
-	return -1, true // end with \
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
deleted file mode 100644
index 6cf66d0..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip_strict.go
+++ /dev/null
@@ -1,99 +0,0 @@
-//+build !jsoniter_sloppy
-
-package jsoniter
-
-import (
-	"fmt"
-	"io"
-)
-
-func (iter *Iterator) skipNumber() {
-	if !iter.trySkipNumber() {
-		iter.unreadByte()
-		if iter.Error != nil && iter.Error != io.EOF {
-			return
-		}
-		iter.ReadFloat64()
-		if iter.Error != nil && iter.Error != io.EOF {
-			iter.Error = nil
-			iter.ReadBigFloat()
-		}
-	}
-}
-
-func (iter *Iterator) trySkipNumber() bool {
-	dotFound := false
-	for i := iter.head; i < iter.tail; i++ {
-		c := iter.buf[i]
-		switch c {
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		case '.':
-			if dotFound {
-				iter.ReportError("validateNumber", `more than one dot found in number`)
-				return true // already failed
-			}
-			if i+1 == iter.tail {
-				return false
-			}
-			c = iter.buf[i+1]
-			switch c {
-			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-			default:
-				iter.ReportError("validateNumber", `missing digit after dot`)
-				return true // already failed
-			}
-			dotFound = true
-		default:
-			switch c {
-			case ',', ']', '}', ' ', '\t', '\n', '\r':
-				if iter.head == i {
-					return false // if - without following digits
-				}
-				iter.head = i
-				return true // must be valid
-			}
-			return false // may be invalid
-		}
-	}
-	return false
-}
-
-func (iter *Iterator) skipString() {
-	if !iter.trySkipString() {
-		iter.unreadByte()
-		iter.ReadString()
-	}
-}
-
-func (iter *Iterator) trySkipString() bool {
-	for i := iter.head; i < iter.tail; i++ {
-		c := iter.buf[i]
-		if c == '"' {
-			iter.head = i + 1
-			return true // valid
-		} else if c == '\\' {
-			return false
-		} else if c < ' ' {
-			iter.ReportError("trySkipString",
-				fmt.Sprintf(`invalid control character found: %d`, c))
-			return true // already failed
-		}
-	}
-	return false
-}
-
-func (iter *Iterator) skipObject() {
-	iter.unreadByte()
-	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
-		iter.Skip()
-		return true
-	})
-}
-
-func (iter *Iterator) skipArray() {
-	iter.unreadByte()
-	iter.ReadArrayCB(func(iter *Iterator) bool {
-		iter.Skip()
-		return true
-	})
-}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
deleted file mode 100644
index adc487e..0000000
--- a/vendor/github.com/json-iterator/go/iter_str.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"unicode/utf16"
-)
-
-// ReadString read string from iterator
-func (iter *Iterator) ReadString() (ret string) {
-	c := iter.nextToken()
-	if c == '"' {
-		for i := iter.head; i < iter.tail; i++ {
-			c := iter.buf[i]
-			if c == '"' {
-				ret = string(iter.buf[iter.head:i])
-				iter.head = i + 1
-				return ret
-			} else if c == '\\' {
-				break
-			} else if c < ' ' {
-				iter.ReportError("ReadString",
-					fmt.Sprintf(`invalid control character found: %d`, c))
-				return
-			}
-		}
-		return iter.readStringSlowPath()
-	} else if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return ""
-	}
-	iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
-	return
-}
-
-func (iter *Iterator) readStringSlowPath() (ret string) {
-	var str []byte
-	var c byte
-	for iter.Error == nil {
-		c = iter.readByte()
-		if c == '"' {
-			return string(str)
-		}
-		if c == '\\' {
-			c = iter.readByte()
-			str = iter.readEscapedChar(c, str)
-		} else {
-			str = append(str, c)
-		}
-	}
-	iter.ReportError("readStringSlowPath", "unexpected end of input")
-	return
-}
-
-func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
-	switch c {
-	case 'u':
-		r := iter.readU4()
-		if utf16.IsSurrogate(r) {
-			c = iter.readByte()
-			if iter.Error != nil {
-				return nil
-			}
-			if c != '\\' {
-				iter.unreadByte()
-				str = appendRune(str, r)
-				return str
-			}
-			c = iter.readByte()
-			if iter.Error != nil {
-				return nil
-			}
-			if c != 'u' {
-				str = appendRune(str, r)
-				return iter.readEscapedChar(c, str)
-			}
-			r2 := iter.readU4()
-			if iter.Error != nil {
-				return nil
-			}
-			combined := utf16.DecodeRune(r, r2)
-			if combined == '\uFFFD' {
-				str = appendRune(str, r)
-				str = appendRune(str, r2)
-			} else {
-				str = appendRune(str, combined)
-			}
-		} else {
-			str = appendRune(str, r)
-		}
-	case '"':
-		str = append(str, '"')
-	case '\\':
-		str = append(str, '\\')
-	case '/':
-		str = append(str, '/')
-	case 'b':
-		str = append(str, '\b')
-	case 'f':
-		str = append(str, '\f')
-	case 'n':
-		str = append(str, '\n')
-	case 'r':
-		str = append(str, '\r')
-	case 't':
-		str = append(str, '\t')
-	default:
-		iter.ReportError("readEscapedChar",
-			`invalid escape char after \`)
-		return nil
-	}
-	return str
-}
-
-// ReadStringAsSlice read string from iterator without copying into string form.
-// The []byte can not be kept, as it will change after next iterator call.
-func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
-	c := iter.nextToken()
-	if c == '"' {
-		for i := iter.head; i < iter.tail; i++ {
-			// require ascii string and no escape
-			// for: field name, base64, number
-			if iter.buf[i] == '"' {
-				// fast path: reuse the underlying buffer
-				ret = iter.buf[iter.head:i]
-				iter.head = i + 1
-				return ret
-			}
-		}
-		readLen := iter.tail - iter.head
-		copied := make([]byte, readLen, readLen*2)
-		copy(copied, iter.buf[iter.head:iter.tail])
-		iter.head = iter.tail
-		for iter.Error == nil {
-			c := iter.readByte()
-			if c == '"' {
-				return copied
-			}
-			copied = append(copied, c)
-		}
-		return copied
-	}
-	iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
-	return
-}
-
-func (iter *Iterator) readU4() (ret rune) {
-	for i := 0; i < 4; i++ {
-		c := iter.readByte()
-		if iter.Error != nil {
-			return
-		}
-		if c >= '0' && c <= '9' {
-			ret = ret*16 + rune(c-'0')
-		} else if c >= 'a' && c <= 'f' {
-			ret = ret*16 + rune(c-'a'+10)
-		} else if c >= 'A' && c <= 'F' {
-			ret = ret*16 + rune(c-'A'+10)
-		} else {
-			iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
-			return
-		}
-	}
-	return ret
-}
-
-const (
-	t1 = 0x00 // 0000 0000
-	tx = 0x80 // 1000 0000
-	t2 = 0xC0 // 1100 0000
-	t3 = 0xE0 // 1110 0000
-	t4 = 0xF0 // 1111 0000
-	t5 = 0xF8 // 1111 1000
-
-	maskx = 0x3F // 0011 1111
-	mask2 = 0x1F // 0001 1111
-	mask3 = 0x0F // 0000 1111
-	mask4 = 0x07 // 0000 0111
-
-	rune1Max = 1<<7 - 1
-	rune2Max = 1<<11 - 1
-	rune3Max = 1<<16 - 1
-
-	surrogateMin = 0xD800
-	surrogateMax = 0xDFFF
-
-	maxRune   = '\U0010FFFF' // Maximum valid Unicode code point.
-	runeError = '\uFFFD'     // the "error" Rune or "Unicode replacement character"
-)
-
-func appendRune(p []byte, r rune) []byte {
-	// Negative values are erroneous. Making it unsigned addresses the problem.
-	switch i := uint32(r); {
-	case i <= rune1Max:
-		p = append(p, byte(r))
-		return p
-	case i <= rune2Max:
-		p = append(p, t2|byte(r>>6))
-		p = append(p, tx|byte(r)&maskx)
-		return p
-	case i > maxRune, surrogateMin <= i && i <= surrogateMax:
-		r = runeError
-		fallthrough
-	case i <= rune3Max:
-		p = append(p, t3|byte(r>>12))
-		p = append(p, tx|byte(r>>6)&maskx)
-		p = append(p, tx|byte(r)&maskx)
-		return p
-	default:
-		p = append(p, t4|byte(r>>18))
-		p = append(p, tx|byte(r>>12)&maskx)
-		p = append(p, tx|byte(r>>6)&maskx)
-		p = append(p, tx|byte(r)&maskx)
-		return p
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
deleted file mode 100644
index c2934f9..0000000
--- a/vendor/github.com/json-iterator/go/jsoniter.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package jsoniter implements encoding and decoding of JSON as defined in
-// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
-// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
-// and variable type declarations (if any).
-// jsoniter interfaces gives 100% compatibility with code using standard lib.
-//
-// "JSON and Go"
-// (https://golang.org/doc/articles/json_and_go.html)
-// gives a description of how Marshal/Unmarshal operate
-// between arbitrary or predefined json objects and bytes,
-// and it applies to jsoniter.Marshal/Unmarshal as well.
-//
-// Besides, jsoniter.Iterator provides a different set of interfaces
-// iterating given bytes/string/reader
-// and yielding parsed elements one by one.
-// This set of interfaces reads input as required and gives
-// better performance.
-package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
deleted file mode 100644
index e2389b5..0000000
--- a/vendor/github.com/json-iterator/go/pool.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package jsoniter
-
-import (
-	"io"
-)
-
-// IteratorPool a thread safe pool of iterators with same configuration
-type IteratorPool interface {
-	BorrowIterator(data []byte) *Iterator
-	ReturnIterator(iter *Iterator)
-}
-
-// StreamPool a thread safe pool of streams with same configuration
-type StreamPool interface {
-	BorrowStream(writer io.Writer) *Stream
-	ReturnStream(stream *Stream)
-}
-
-func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
-	stream := cfg.streamPool.Get().(*Stream)
-	stream.Reset(writer)
-	return stream
-}
-
-func (cfg *frozenConfig) ReturnStream(stream *Stream) {
-	stream.out = nil
-	stream.Error = nil
-	stream.Attachment = nil
-	cfg.streamPool.Put(stream)
-}
-
-func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
-	iter := cfg.iteratorPool.Get().(*Iterator)
-	iter.ResetBytes(data)
-	return iter
-}
-
-func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
-	iter.Error = nil
-	iter.Attachment = nil
-	cfg.iteratorPool.Put(iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
deleted file mode 100644
index 4459e20..0000000
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"reflect"
-	"unsafe"
-
-	"github.com/modern-go/reflect2"
-)
-
-// ValDecoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValDecoder with json.Decoder.
-// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
-//
-// Reflection on type to create decoders, which is then cached
-// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
-// 1. create instance of new value, for example *int will need a int to be allocated
-// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
-// 3. assignment to map, both key and value will be reflect.Value
-// For a simple struct binding, it will be reflect.Value free and allocation free
-type ValDecoder interface {
-	Decode(ptr unsafe.Pointer, iter *Iterator)
-}
-
-// ValEncoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValEncoder with json.Encoder.
-// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
-type ValEncoder interface {
-	IsEmpty(ptr unsafe.Pointer) bool
-	Encode(ptr unsafe.Pointer, stream *Stream)
-}
-
-type checkIsEmpty interface {
-	IsEmpty(ptr unsafe.Pointer) bool
-}
-
-type ctx struct {
-	*frozenConfig
-	prefix   string
-	encoders map[reflect2.Type]ValEncoder
-	decoders map[reflect2.Type]ValDecoder
-}
-
-func (b *ctx) caseSensitive() bool {
-	if b.frozenConfig == nil {
-		// default is case-insensitive
-		return false
-	}
-	return b.frozenConfig.caseSensitive
-}
-
-func (b *ctx) append(prefix string) *ctx {
-	return &ctx{
-		frozenConfig: b.frozenConfig,
-		prefix:       b.prefix + " " + prefix,
-		encoders:     b.encoders,
-		decoders:     b.decoders,
-	}
-}
-
-// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
-func (iter *Iterator) ReadVal(obj interface{}) {
-	cacheKey := reflect2.RTypeOf(obj)
-	decoder := iter.cfg.getDecoderFromCache(cacheKey)
-	if decoder == nil {
-		typ := reflect2.TypeOf(obj)
-		if typ.Kind() != reflect.Ptr {
-			iter.ReportError("ReadVal", "can only unmarshal into pointer")
-			return
-		}
-		decoder = iter.cfg.DecoderOf(typ)
-	}
-	ptr := reflect2.PtrOf(obj)
-	if ptr == nil {
-		iter.ReportError("ReadVal", "can not read into nil pointer")
-		return
-	}
-	decoder.Decode(ptr, iter)
-}
-
-// WriteVal copy the go interface into underlying JSON, same as json.Marshal
-func (stream *Stream) WriteVal(val interface{}) {
-	if nil == val {
-		stream.WriteNil()
-		return
-	}
-	cacheKey := reflect2.RTypeOf(val)
-	encoder := stream.cfg.getEncoderFromCache(cacheKey)
-	if encoder == nil {
-		typ := reflect2.TypeOf(val)
-		encoder = stream.cfg.EncoderOf(typ)
-	}
-	encoder.Encode(reflect2.PtrOf(val), stream)
-}
-
-func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
-	cacheKey := typ.RType()
-	decoder := cfg.getDecoderFromCache(cacheKey)
-	if decoder != nil {
-		return decoder
-	}
-	ctx := &ctx{
-		frozenConfig: cfg,
-		prefix:       "",
-		decoders:     map[reflect2.Type]ValDecoder{},
-		encoders:     map[reflect2.Type]ValEncoder{},
-	}
-	ptrType := typ.(*reflect2.UnsafePtrType)
-	decoder = decoderOfType(ctx, ptrType.Elem())
-	cfg.addDecoderToCache(cacheKey, decoder)
-	return decoder
-}
-
-func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
-	decoder := getTypeDecoderFromExtension(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	decoder = createDecoderOfType(ctx, typ)
-	for _, extension := range extensions {
-		decoder = extension.DecorateDecoder(typ, decoder)
-	}
-	decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
-	for _, extension := range ctx.extraExtensions {
-		decoder = extension.DecorateDecoder(typ, decoder)
-	}
-	return decoder
-}
-
-func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
-	decoder := ctx.decoders[typ]
-	if decoder != nil {
-		return decoder
-	}
-	placeholder := &placeholderDecoder{}
-	ctx.decoders[typ] = placeholder
-	decoder = _createDecoderOfType(ctx, typ)
-	placeholder.decoder = decoder
-	return decoder
-}
-
-func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
-	decoder := createDecoderOfJsonRawMessage(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	decoder = createDecoderOfJsonNumber(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	decoder = createDecoderOfMarshaler(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	decoder = createDecoderOfAny(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	decoder = createDecoderOfNative(ctx, typ)
-	if decoder != nil {
-		return decoder
-	}
-	switch typ.Kind() {
-	case reflect.Interface:
-		ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
-		if isIFace {
-			return &ifaceDecoder{valType: ifaceType}
-		}
-		return &efaceDecoder{}
-	case reflect.Struct:
-		return decoderOfStruct(ctx, typ)
-	case reflect.Array:
-		return decoderOfArray(ctx, typ)
-	case reflect.Slice:
-		return decoderOfSlice(ctx, typ)
-	case reflect.Map:
-		return decoderOfMap(ctx, typ)
-	case reflect.Ptr:
-		return decoderOfOptional(ctx, typ)
-	default:
-		return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
-	}
-}
-
-func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
-	cacheKey := typ.RType()
-	encoder := cfg.getEncoderFromCache(cacheKey)
-	if encoder != nil {
-		return encoder
-	}
-	ctx := &ctx{
-		frozenConfig: cfg,
-		prefix:       "",
-		decoders:     map[reflect2.Type]ValDecoder{},
-		encoders:     map[reflect2.Type]ValEncoder{},
-	}
-	encoder = encoderOfType(ctx, typ)
-	if typ.LikePtr() {
-		encoder = &onePtrEncoder{encoder}
-	}
-	cfg.addEncoderToCache(cacheKey, encoder)
-	return encoder
-}
-
-type onePtrEncoder struct {
-	encoder ValEncoder
-}
-
-func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
-	encoder := getTypeEncoderFromExtension(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	encoder = createEncoderOfType(ctx, typ)
-	for _, extension := range extensions {
-		encoder = extension.DecorateEncoder(typ, encoder)
-	}
-	encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
-	for _, extension := range ctx.extraExtensions {
-		encoder = extension.DecorateEncoder(typ, encoder)
-	}
-	return encoder
-}
-
-func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
-	encoder := ctx.encoders[typ]
-	if encoder != nil {
-		return encoder
-	}
-	placeholder := &placeholderEncoder{}
-	ctx.encoders[typ] = placeholder
-	encoder = _createEncoderOfType(ctx, typ)
-	placeholder.encoder = encoder
-	return encoder
-}
-func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
-	encoder := createEncoderOfJsonRawMessage(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	encoder = createEncoderOfJsonNumber(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	encoder = createEncoderOfMarshaler(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	encoder = createEncoderOfAny(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	encoder = createEncoderOfNative(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	kind := typ.Kind()
-	switch kind {
-	case reflect.Interface:
-		return &dynamicEncoder{typ}
-	case reflect.Struct:
-		return encoderOfStruct(ctx, typ)
-	case reflect.Array:
-		return encoderOfArray(ctx, typ)
-	case reflect.Slice:
-		return encoderOfSlice(ctx, typ)
-	case reflect.Map:
-		return encoderOfMap(ctx, typ)
-	case reflect.Ptr:
-		return encoderOfOptional(ctx, typ)
-	default:
-		return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
-	}
-}
-
-type lazyErrorDecoder struct {
-	err error
-}
-
-func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if iter.WhatIsNext() != NilValue {
-		if iter.Error == nil {
-			iter.Error = decoder.err
-		}
-	} else {
-		iter.Skip()
-	}
-}
-
-type lazyErrorEncoder struct {
-	err error
-}
-
-func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	if ptr == nil {
-		stream.WriteNil()
-	} else if stream.Error == nil {
-		stream.Error = encoder.err
-	}
-}
-
-func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return false
-}
-
-type placeholderDecoder struct {
-	decoder ValDecoder
-}
-
-func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.decoder.Decode(ptr, iter)
-}
-
-type placeholderEncoder struct {
-	encoder ValEncoder
-}
-
-func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	encoder.encoder.Encode(ptr, stream)
-}
-
-func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.encoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
deleted file mode 100644
index 13a0b7b..0000000
--- a/vendor/github.com/json-iterator/go/reflect_array.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"io"
-	"unsafe"
-)
-
-func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
-	arrayType := typ.(*reflect2.UnsafeArrayType)
-	decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
-	return &arrayDecoder{arrayType, decoder}
-}
-
-func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
-	arrayType := typ.(*reflect2.UnsafeArrayType)
-	if arrayType.Len() == 0 {
-		return emptyArrayEncoder{}
-	}
-	encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
-	return &arrayEncoder{arrayType, encoder}
-}
-
-type emptyArrayEncoder struct{}
-
-func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteEmptyArray()
-}
-
-func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return true
-}
-
-type arrayEncoder struct {
-	arrayType   *reflect2.UnsafeArrayType
-	elemEncoder ValEncoder
-}
-
-func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteArrayStart()
-	elemPtr := unsafe.Pointer(ptr)
-	encoder.elemEncoder.Encode(elemPtr, stream)
-	for i := 1; i < encoder.arrayType.Len(); i++ {
-		stream.WriteMore()
-		elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
-		encoder.elemEncoder.Encode(elemPtr, stream)
-	}
-	stream.WriteArrayEnd()
-	if stream.Error != nil && stream.Error != io.EOF {
-		stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
-	}
-}
-
-func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return false
-}
-
-type arrayDecoder struct {
-	arrayType   *reflect2.UnsafeArrayType
-	elemDecoder ValDecoder
-}
-
-func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.doDecode(ptr, iter)
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
-	}
-}
-
-func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
-	c := iter.nextToken()
-	arrayType := decoder.arrayType
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		return
-	}
-	if c != '[' {
-		iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
-		return
-	}
-	c = iter.nextToken()
-	if c == ']' {
-		return
-	}
-	iter.unreadByte()
-	elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
-	decoder.elemDecoder.Decode(elemPtr, iter)
-	length := 1
-	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
-		if length >= arrayType.Len() {
-			iter.Skip()
-			continue
-		}
-		idx := length
-		length += 1
-		elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
-		decoder.elemDecoder.Decode(elemPtr, iter)
-	}
-	if c != ']' {
-		iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
-		return
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
deleted file mode 100644
index 8b6bc8b..0000000
--- a/vendor/github.com/json-iterator/go/reflect_dynamic.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package jsoniter
-
-import (
-	"github.com/modern-go/reflect2"
-	"reflect"
-	"unsafe"
-)
-
-type dynamicEncoder struct {
-	valType reflect2.Type
-}
-
-func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	obj := encoder.valType.UnsafeIndirect(ptr)
-	stream.WriteVal(obj)
-}
-
-func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.valType.UnsafeIndirect(ptr) == nil
-}
-
-type efaceDecoder struct {
-}
-
-func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	pObj := (*interface{})(ptr)
-	obj := *pObj
-	if obj == nil {
-		*pObj = iter.Read()
-		return
-	}
-	typ := reflect2.TypeOf(obj)
-	if typ.Kind() != reflect.Ptr {
-		*pObj = iter.Read()
-		return
-	}
-	ptrType := typ.(*reflect2.UnsafePtrType)
-	ptrElemType := ptrType.Elem()
-	if iter.WhatIsNext() == NilValue {
-		if ptrElemType.Kind() != reflect.Ptr {
-			iter.skipFourBytes('n', 'u', 'l', 'l')
-			*pObj = nil
-			return
-		}
-	}
-	if reflect2.IsNil(obj) {
-		obj := ptrElemType.New()
-		iter.ReadVal(obj)
-		*pObj = obj
-		return
-	}
-	iter.ReadVal(obj)
-}
-
-type ifaceDecoder struct {
-	valType *reflect2.UnsafeIFaceType
-}
-
-func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if iter.ReadNil() {
-		decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
-		return
-	}
-	obj := decoder.valType.UnsafeIndirect(ptr)
-	if reflect2.IsNil(obj) {
-		iter.ReportError("decode non empty interface", "can not unmarshal into nil")
-		return
-	}
-	iter.ReadVal(obj)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
deleted file mode 100644
index 05e8fbf..0000000
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"reflect"
-	"sort"
-	"strings"
-	"unicode"
-	"unsafe"
-)
-
-var typeDecoders = map[string]ValDecoder{}
-var fieldDecoders = map[string]ValDecoder{}
-var typeEncoders = map[string]ValEncoder{}
-var fieldEncoders = map[string]ValEncoder{}
-var extensions = []Extension{}
-
-// StructDescriptor describe how should we encode/decode the struct
-type StructDescriptor struct {
-	Type   reflect2.Type
-	Fields []*Binding
-}
-
-// GetField get one field from the descriptor by its name.
-// Can not use map here to keep field orders.
-func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
-	for _, binding := range structDescriptor.Fields {
-		if binding.Field.Name() == fieldName {
-			return binding
-		}
-	}
-	return nil
-}
-
-// Binding describe how should we encode/decode the struct field
-type Binding struct {
-	levels    []int
-	Field     reflect2.StructField
-	FromNames []string
-	ToNames   []string
-	Encoder   ValEncoder
-	Decoder   ValDecoder
-}
-
-// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
-// Can also rename fields by UpdateStructDescriptor.
-type Extension interface {
-	UpdateStructDescriptor(structDescriptor *StructDescriptor)
-	CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
-	CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
-	CreateDecoder(typ reflect2.Type) ValDecoder
-	CreateEncoder(typ reflect2.Type) ValEncoder
-	DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
-	DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
-}
-
-// DummyExtension embed this type get dummy implementation for all methods of Extension
-type DummyExtension struct {
-}
-
-// UpdateStructDescriptor No-op
-func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
-	return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
-	return nil
-}
-
-// CreateDecoder No-op
-func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
-	return nil
-}
-
-// CreateEncoder No-op
-func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
-	return nil
-}
-
-// DecorateDecoder No-op
-func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
-	return decoder
-}
-
-// DecorateEncoder No-op
-func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
-	return encoder
-}
-
-type EncoderExtension map[reflect2.Type]ValEncoder
-
-// UpdateStructDescriptor No-op
-func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateDecoder No-op
-func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
-	return nil
-}
-
-// CreateEncoder get encoder from map
-func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
-	return extension[typ]
-}
-
-// CreateMapKeyDecoder No-op
-func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
-	return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
-	return nil
-}
-
-// DecorateDecoder No-op
-func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
-	return decoder
-}
-
-// DecorateEncoder No-op
-func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
-	return encoder
-}
-
-type DecoderExtension map[reflect2.Type]ValDecoder
-
-// UpdateStructDescriptor No-op
-func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
-	return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
-	return nil
-}
-
-// CreateDecoder get decoder from map
-func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
-	return extension[typ]
-}
-
-// CreateEncoder No-op
-func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
-	return nil
-}
-
-// DecorateDecoder No-op
-func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
-	return decoder
-}
-
-// DecorateEncoder No-op
-func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
-	return encoder
-}
-
-type funcDecoder struct {
-	fun DecoderFunc
-}
-
-func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.fun(ptr, iter)
-}
-
-type funcEncoder struct {
-	fun         EncoderFunc
-	isEmptyFunc func(ptr unsafe.Pointer) bool
-}
-
-func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	encoder.fun(ptr, stream)
-}
-
-func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	if encoder.isEmptyFunc == nil {
-		return false
-	}
-	return encoder.isEmptyFunc(ptr)
-}
-
-// DecoderFunc the function form of TypeDecoder
-type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
-
-// EncoderFunc the function form of TypeEncoder
-type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
-
-// RegisterTypeDecoderFunc register TypeDecoder for a type with function
-func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
-	typeDecoders[typ] = &funcDecoder{fun}
-}
-
-// RegisterTypeDecoder register TypeDecoder for a typ
-func RegisterTypeDecoder(typ string, decoder ValDecoder) {
-	typeDecoders[typ] = decoder
-}
-
-// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
-func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
-	RegisterFieldDecoder(typ, field, &funcDecoder{fun})
-}
-
-// RegisterFieldDecoder register TypeDecoder for a struct field
-func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
-	fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
-}
-
-// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
-func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
-	typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
-}
-
-// RegisterTypeEncoder register TypeEncoder for a type
-func RegisterTypeEncoder(typ string, encoder ValEncoder) {
-	typeEncoders[typ] = encoder
-}
-
-// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
-func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
-	RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
-}
-
-// RegisterFieldEncoder register TypeEncoder for a struct field
-func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
-	fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
-}
-
-// RegisterExtension register extension
-func RegisterExtension(extension Extension) {
-	extensions = append(extensions, extension)
-}
-
-func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
-	decoder := _getTypeDecoderFromExtension(ctx, typ)
-	if decoder != nil {
-		for _, extension := range extensions {
-			decoder = extension.DecorateDecoder(typ, decoder)
-		}
-		decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
-		for _, extension := range ctx.extraExtensions {
-			decoder = extension.DecorateDecoder(typ, decoder)
-		}
-	}
-	return decoder
-}
-func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
-	for _, extension := range extensions {
-		decoder := extension.CreateDecoder(typ)
-		if decoder != nil {
-			return decoder
-		}
-	}
-	decoder := ctx.decoderExtension.CreateDecoder(typ)
-	if decoder != nil {
-		return decoder
-	}
-	for _, extension := range ctx.extraExtensions {
-		decoder := extension.CreateDecoder(typ)
-		if decoder != nil {
-			return decoder
-		}
-	}
-	typeName := typ.String()
-	decoder = typeDecoders[typeName]
-	if decoder != nil {
-		return decoder
-	}
-	if typ.Kind() == reflect.Ptr {
-		ptrType := typ.(*reflect2.UnsafePtrType)
-		decoder := typeDecoders[ptrType.Elem().String()]
-		if decoder != nil {
-			return &OptionalDecoder{ptrType.Elem(), decoder}
-		}
-	}
-	return nil
-}
-
-func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
-	encoder := _getTypeEncoderFromExtension(ctx, typ)
-	if encoder != nil {
-		for _, extension := range extensions {
-			encoder = extension.DecorateEncoder(typ, encoder)
-		}
-		encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
-		for _, extension := range ctx.extraExtensions {
-			encoder = extension.DecorateEncoder(typ, encoder)
-		}
-	}
-	return encoder
-}
-
-func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
-	for _, extension := range extensions {
-		encoder := extension.CreateEncoder(typ)
-		if encoder != nil {
-			return encoder
-		}
-	}
-	encoder := ctx.encoderExtension.CreateEncoder(typ)
-	if encoder != nil {
-		return encoder
-	}
-	for _, extension := range ctx.extraExtensions {
-		encoder := extension.CreateEncoder(typ)
-		if encoder != nil {
-			return encoder
-		}
-	}
-	typeName := typ.String()
-	encoder = typeEncoders[typeName]
-	if encoder != nil {
-		return encoder
-	}
-	if typ.Kind() == reflect.Ptr {
-		typePtr := typ.(*reflect2.UnsafePtrType)
-		encoder := typeEncoders[typePtr.Elem().String()]
-		if encoder != nil {
-			return &OptionalEncoder{encoder}
-		}
-	}
-	return nil
-}
-
-func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
-	structType := typ.(*reflect2.UnsafeStructType)
-	embeddedBindings := []*Binding{}
-	bindings := []*Binding{}
-	for i := 0; i < structType.NumField(); i++ {
-		field := structType.Field(i)
-		tag, hastag := field.Tag().Lookup(ctx.getTagKey())
-		if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
-			continue
-		}
-		tagParts := strings.Split(tag, ",")
-		if tag == "-" {
-			continue
-		}
-		if field.Anonymous() && (tag == "" || tagParts[0] == "") {
-			if field.Type().Kind() == reflect.Struct {
-				structDescriptor := describeStruct(ctx, field.Type())
-				for _, binding := range structDescriptor.Fields {
-					binding.levels = append([]int{i}, binding.levels...)
-					omitempty := binding.Encoder.(*structFieldEncoder).omitempty
-					binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
-					binding.Decoder = &structFieldDecoder{field, binding.Decoder}
-					embeddedBindings = append(embeddedBindings, binding)
-				}
-				continue
-			} else if field.Type().Kind() == reflect.Ptr {
-				ptrType := field.Type().(*reflect2.UnsafePtrType)
-				if ptrType.Elem().Kind() == reflect.Struct {
-					structDescriptor := describeStruct(ctx, ptrType.Elem())
-					for _, binding := range structDescriptor.Fields {
-						binding.levels = append([]int{i}, binding.levels...)
-						omitempty := binding.Encoder.(*structFieldEncoder).omitempty
-						binding.Encoder = &dereferenceEncoder{binding.Encoder}
-						binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
-						binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
-						binding.Decoder = &structFieldDecoder{field, binding.Decoder}
-						embeddedBindings = append(embeddedBindings, binding)
-					}
-					continue
-				}
-			}
-		}
-		fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
-		fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
-		decoder := fieldDecoders[fieldCacheKey]
-		if decoder == nil {
-			decoder = decoderOfType(ctx.append(field.Name()), field.Type())
-		}
-		encoder := fieldEncoders[fieldCacheKey]
-		if encoder == nil {
-			encoder = encoderOfType(ctx.append(field.Name()), field.Type())
-		}
-		binding := &Binding{
-			Field:     field,
-			FromNames: fieldNames,
-			ToNames:   fieldNames,
-			Decoder:   decoder,
-			Encoder:   encoder,
-		}
-		binding.levels = []int{i}
-		bindings = append(bindings, binding)
-	}
-	return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
-}
-func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
-	structDescriptor := &StructDescriptor{
-		Type:   typ,
-		Fields: bindings,
-	}
-	for _, extension := range extensions {
-		extension.UpdateStructDescriptor(structDescriptor)
-	}
-	ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
-	ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
-	for _, extension := range ctx.extraExtensions {
-		extension.UpdateStructDescriptor(structDescriptor)
-	}
-	processTags(structDescriptor, ctx.frozenConfig)
-	// merge normal & embedded bindings & sort with original order
-	allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
-	sort.Sort(allBindings)
-	structDescriptor.Fields = allBindings
-	return structDescriptor
-}
-
-type sortableBindings []*Binding
-
-func (bindings sortableBindings) Len() int {
-	return len(bindings)
-}
-
-func (bindings sortableBindings) Less(i, j int) bool {
-	left := bindings[i].levels
-	right := bindings[j].levels
-	k := 0
-	for {
-		if left[k] < right[k] {
-			return true
-		} else if left[k] > right[k] {
-			return false
-		}
-		k++
-	}
-}
-
-func (bindings sortableBindings) Swap(i, j int) {
-	bindings[i], bindings[j] = bindings[j], bindings[i]
-}
-
-func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
-	for _, binding := range structDescriptor.Fields {
-		shouldOmitEmpty := false
-		tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
-		for _, tagPart := range tagParts[1:] {
-			if tagPart == "omitempty" {
-				shouldOmitEmpty = true
-			} else if tagPart == "string" {
-				if binding.Field.Type().Kind() == reflect.String {
-					binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
-					binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
-				} else {
-					binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
-					binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
-				}
-			}
-		}
-		binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
-		binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
-	}
-}
-
-func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
-	// ignore?
-	if wholeTag == "-" {
-		return []string{}
-	}
-	// rename?
-	var fieldNames []string
-	if tagProvidedFieldName == "" {
-		fieldNames = []string{originalFieldName}
-	} else {
-		fieldNames = []string{tagProvidedFieldName}
-	}
-	// private?
-	isNotExported := unicode.IsLower(rune(originalFieldName[0]))
-	if isNotExported {
-		fieldNames = []string{}
-	}
-	return fieldNames
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
deleted file mode 100644
index 98d45c1..0000000
--- a/vendor/github.com/json-iterator/go/reflect_json_number.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/json"
-	"github.com/modern-go/reflect2"
-	"strconv"
-	"unsafe"
-)
-
-type Number string
-
-// String returns the literal text of the number.
-func (n Number) String() string { return string(n) }
-
-// Float64 returns the number as a float64.
-func (n Number) Float64() (float64, error) {
-	return strconv.ParseFloat(string(n), 64)
-}
-
-// Int64 returns the number as an int64.
-func (n Number) Int64() (int64, error) {
-	return strconv.ParseInt(string(n), 10, 64)
-}
-
-func CastJsonNumber(val interface{}) (string, bool) {
-	switch typedVal := val.(type) {
-	case json.Number:
-		return string(typedVal), true
-	case Number:
-		return string(typedVal), true
-	}
-	return "", false
-}
-
-var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
-var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
-
-func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
-	if typ.AssignableTo(jsonNumberType) {
-		return &jsonNumberCodec{}
-	}
-	if typ.AssignableTo(jsoniterNumberType) {
-		return &jsoniterNumberCodec{}
-	}
-	return nil
-}
-
-func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
-	if typ.AssignableTo(jsonNumberType) {
-		return &jsonNumberCodec{}
-	}
-	if typ.AssignableTo(jsoniterNumberType) {
-		return &jsoniterNumberCodec{}
-	}
-	return nil
-}
-
-type jsonNumberCodec struct {
-}
-
-func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	switch iter.WhatIsNext() {
-	case StringValue:
-		*((*json.Number)(ptr)) = json.Number(iter.ReadString())
-	case NilValue:
-		iter.skipFourBytes('n', 'u', 'l', 'l')
-		*((*json.Number)(ptr)) = ""
-	default:
-		*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
-	}
-}
-
-func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	number := *((*json.Number)(ptr))
-	if len(number) == 0 {
-		stream.writeByte('0')
-	} else {
-		stream.WriteRaw(string(number))
-	}
-}
-
-func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return len(*((*json.Number)(ptr))) == 0
-}
-
-type jsoniterNumberCodec struct {
-}
-
-func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	switch iter.WhatIsNext() {
-	case StringValue:
-		*((*Number)(ptr)) = Number(iter.ReadString())
-	case NilValue:
-		iter.skipFourBytes('n', 'u', 'l', 'l')
-		*((*Number)(ptr)) = ""
-	default:
-		*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
-	}
-}
-
-func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	number := *((*Number)(ptr))
-	if len(number) == 0 {
-		stream.writeByte('0')
-	} else {
-		stream.WriteRaw(string(number))
-	}
-}
-
-func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return len(*((*Number)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
deleted file mode 100644
index f261993..0000000
--- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/json"
-	"github.com/modern-go/reflect2"
-	"unsafe"
-)
-
-var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
-var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
-
-func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
-	if typ == jsonRawMessageType {
-		return &jsonRawMessageCodec{}
-	}
-	if typ == jsoniterRawMessageType {
-		return &jsoniterRawMessageCodec{}
-	}
-	return nil
-}
-
-func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
-	if typ == jsonRawMessageType {
-		return &jsonRawMessageCodec{}
-	}
-	if typ == jsoniterRawMessageType {
-		return &jsoniterRawMessageCodec{}
-	}
-	return nil
-}
-
-type jsonRawMessageCodec struct {
-}
-
-func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
-}
-
-func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
-}
-
-func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return len(*((*json.RawMessage)(ptr))) == 0
-}
-
-type jsoniterRawMessageCodec struct {
-}
-
-func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
-}
-
-func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteRaw(string(*((*RawMessage)(ptr))))
-}
-
-func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return len(*((*RawMessage)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
deleted file mode 100644
index 547b442..0000000
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ /dev/null
@@ -1,338 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"io"
-	"reflect"
-	"sort"
-	"unsafe"
-)
-
-func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
-	mapType := typ.(*reflect2.UnsafeMapType)
-	keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
-	elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
-	return &mapDecoder{
-		mapType:     mapType,
-		keyType:     mapType.Key(),
-		elemType:    mapType.Elem(),
-		keyDecoder:  keyDecoder,
-		elemDecoder: elemDecoder,
-	}
-}
-
-func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
-	mapType := typ.(*reflect2.UnsafeMapType)
-	if ctx.sortMapKeys {
-		return &sortKeysMapEncoder{
-			mapType:     mapType,
-			keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
-			elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
-		}
-	}
-	return &mapEncoder{
-		mapType:     mapType,
-		keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
-		elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
-	}
-}
-
-func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
-	decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
-	if decoder != nil {
-		return decoder
-	}
-	for _, extension := range ctx.extraExtensions {
-		decoder := extension.CreateMapKeyDecoder(typ)
-		if decoder != nil {
-			return decoder
-		}
-	}
-	switch typ.Kind() {
-	case reflect.String:
-		return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
-	case reflect.Bool,
-		reflect.Uint8, reflect.Int8,
-		reflect.Uint16, reflect.Int16,
-		reflect.Uint32, reflect.Int32,
-		reflect.Uint64, reflect.Int64,
-		reflect.Uint, reflect.Int,
-		reflect.Float32, reflect.Float64,
-		reflect.Uintptr:
-		typ = reflect2.DefaultTypeOfKind(typ.Kind())
-		return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
-	default:
-		ptrType := reflect2.PtrTo(typ)
-		if ptrType.Implements(unmarshalerType) {
-			return &referenceDecoder{
-				&unmarshalerDecoder{
-					valType: ptrType,
-				},
-			}
-		}
-		if typ.Implements(unmarshalerType) {
-			return &unmarshalerDecoder{
-				valType: typ,
-			}
-		}
-		if ptrType.Implements(textUnmarshalerType) {
-			return &referenceDecoder{
-				&textUnmarshalerDecoder{
-					valType: ptrType,
-				},
-			}
-		}
-		if typ.Implements(textUnmarshalerType) {
-			return &textUnmarshalerDecoder{
-				valType: typ,
-			}
-		}
-		return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
-	}
-}
-
-func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
-	encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
-	if encoder != nil {
-		return encoder
-	}
-	for _, extension := range ctx.extraExtensions {
-		encoder := extension.CreateMapKeyEncoder(typ)
-		if encoder != nil {
-			return encoder
-		}
-	}
-	switch typ.Kind() {
-	case reflect.String:
-		return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
-	case reflect.Bool,
-		reflect.Uint8, reflect.Int8,
-		reflect.Uint16, reflect.Int16,
-		reflect.Uint32, reflect.Int32,
-		reflect.Uint64, reflect.Int64,
-		reflect.Uint, reflect.Int,
-		reflect.Float32, reflect.Float64,
-		reflect.Uintptr:
-		typ = reflect2.DefaultTypeOfKind(typ.Kind())
-		return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
-	default:
-		if typ == textMarshalerType {
-			return &directTextMarshalerEncoder{
-				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
-			}
-		}
-		if typ.Implements(textMarshalerType) {
-			return &textMarshalerEncoder{
-				valType:       typ,
-				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
-			}
-		}
-		if typ.Kind() == reflect.Interface {
-			return &dynamicMapKeyEncoder{ctx, typ}
-		}
-		return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
-	}
-}
-
-type mapDecoder struct {
-	mapType     *reflect2.UnsafeMapType
-	keyType     reflect2.Type
-	elemType    reflect2.Type
-	keyDecoder  ValDecoder
-	elemDecoder ValDecoder
-}
-
-func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	mapType := decoder.mapType
-	c := iter.nextToken()
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		*(*unsafe.Pointer)(ptr) = nil
-		mapType.UnsafeSet(ptr, mapType.UnsafeNew())
-		return
-	}
-	if mapType.UnsafeIsNil(ptr) {
-		mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
-	}
-	if c != '{' {
-		iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
-		return
-	}
-	c = iter.nextToken()
-	if c == '}' {
-		return
-	}
-	if c != '"' {
-		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
-		return
-	}
-	iter.unreadByte()
-	key := decoder.keyType.UnsafeNew()
-	decoder.keyDecoder.Decode(key, iter)
-	c = iter.nextToken()
-	if c != ':' {
-		iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
-		return
-	}
-	elem := decoder.elemType.UnsafeNew()
-	decoder.elemDecoder.Decode(elem, iter)
-	decoder.mapType.UnsafeSetIndex(ptr, key, elem)
-	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
-		key := decoder.keyType.UnsafeNew()
-		decoder.keyDecoder.Decode(key, iter)
-		c = iter.nextToken()
-		if c != ':' {
-			iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
-			return
-		}
-		elem := decoder.elemType.UnsafeNew()
-		decoder.elemDecoder.Decode(elem, iter)
-		decoder.mapType.UnsafeSetIndex(ptr, key, elem)
-	}
-	if c != '}' {
-		iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
-	}
-}
-
-type numericMapKeyDecoder struct {
-	decoder ValDecoder
-}
-
-func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	c := iter.nextToken()
-	if c != '"' {
-		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
-		return
-	}
-	decoder.decoder.Decode(ptr, iter)
-	c = iter.nextToken()
-	if c != '"' {
-		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
-		return
-	}
-}
-
-type numericMapKeyEncoder struct {
-	encoder ValEncoder
-}
-
-func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.writeByte('"')
-	encoder.encoder.Encode(ptr, stream)
-	stream.writeByte('"')
-}
-
-func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return false
-}
-
-type dynamicMapKeyEncoder struct {
-	ctx     *ctx
-	valType reflect2.Type
-}
-
-func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	obj := encoder.valType.UnsafeIndirect(ptr)
-	encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
-}
-
-func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	obj := encoder.valType.UnsafeIndirect(ptr)
-	return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
-}
-
-type mapEncoder struct {
-	mapType     *reflect2.UnsafeMapType
-	keyEncoder  ValEncoder
-	elemEncoder ValEncoder
-}
-
-func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteObjectStart()
-	iter := encoder.mapType.UnsafeIterate(ptr)
-	for i := 0; iter.HasNext(); i++ {
-		if i != 0 {
-			stream.WriteMore()
-		}
-		key, elem := iter.UnsafeNext()
-		encoder.keyEncoder.Encode(key, stream)
-		if stream.indention > 0 {
-			stream.writeTwoBytes(byte(':'), byte(' '))
-		} else {
-			stream.writeByte(':')
-		}
-		encoder.elemEncoder.Encode(elem, stream)
-	}
-	stream.WriteObjectEnd()
-}
-
-func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	iter := encoder.mapType.UnsafeIterate(ptr)
-	return !iter.HasNext()
-}
-
-type sortKeysMapEncoder struct {
-	mapType     *reflect2.UnsafeMapType
-	keyEncoder  ValEncoder
-	elemEncoder ValEncoder
-}
-
-func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	if *(*unsafe.Pointer)(ptr) == nil {
-		stream.WriteNil()
-		return
-	}
-	stream.WriteObjectStart()
-	mapIter := encoder.mapType.UnsafeIterate(ptr)
-	subStream := stream.cfg.BorrowStream(nil)
-	subIter := stream.cfg.BorrowIterator(nil)
-	keyValues := encodedKeyValues{}
-	for mapIter.HasNext() {
-		subStream.buf = make([]byte, 0, 64)
-		key, elem := mapIter.UnsafeNext()
-		encoder.keyEncoder.Encode(key, subStream)
-		if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
-			stream.Error = subStream.Error
-		}
-		encodedKey := subStream.Buffer()
-		subIter.ResetBytes(encodedKey)
-		decodedKey := subIter.ReadString()
-		if stream.indention > 0 {
-			subStream.writeTwoBytes(byte(':'), byte(' '))
-		} else {
-			subStream.writeByte(':')
-		}
-		encoder.elemEncoder.Encode(elem, subStream)
-		keyValues = append(keyValues, encodedKV{
-			key:      decodedKey,
-			keyValue: subStream.Buffer(),
-		})
-	}
-	sort.Sort(keyValues)
-	for i, keyValue := range keyValues {
-		if i != 0 {
-			stream.WriteMore()
-		}
-		stream.Write(keyValue.keyValue)
-	}
-	stream.WriteObjectEnd()
-	stream.cfg.ReturnStream(subStream)
-	stream.cfg.ReturnIterator(subIter)
-}
-
-func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	iter := encoder.mapType.UnsafeIterate(ptr)
-	return !iter.HasNext()
-}
-
-type encodedKeyValues []encodedKV
-
-type encodedKV struct {
-	key      string
-	keyValue []byte
-}
-
-func (sv encodedKeyValues) Len() int           { return len(sv) }
-func (sv encodedKeyValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
-func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
deleted file mode 100644
index fea5071..0000000
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package jsoniter
-
-import (
-	"encoding"
-	"encoding/json"
-	"github.com/modern-go/reflect2"
-	"unsafe"
-)
-
-var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
-var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
-var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
-var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
-
-func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
-	ptrType := reflect2.PtrTo(typ)
-	if ptrType.Implements(unmarshalerType) {
-		return &referenceDecoder{
-			&unmarshalerDecoder{ptrType},
-		}
-	}
-	if ptrType.Implements(textUnmarshalerType) {
-		return &referenceDecoder{
-			&textUnmarshalerDecoder{ptrType},
-		}
-	}
-	return nil
-}
-
-func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
-	if typ == marshalerType {
-		checkIsEmpty := createCheckIsEmpty(ctx, typ)
-		var encoder ValEncoder = &directMarshalerEncoder{
-			checkIsEmpty: checkIsEmpty,
-		}
-		return encoder
-	}
-	if typ.Implements(marshalerType) {
-		checkIsEmpty := createCheckIsEmpty(ctx, typ)
-		var encoder ValEncoder = &marshalerEncoder{
-			valType:      typ,
-			checkIsEmpty: checkIsEmpty,
-		}
-		return encoder
-	}
-	ptrType := reflect2.PtrTo(typ)
-	if ctx.prefix != "" && ptrType.Implements(marshalerType) {
-		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
-		var encoder ValEncoder = &marshalerEncoder{
-			valType:      ptrType,
-			checkIsEmpty: checkIsEmpty,
-		}
-		return &referenceEncoder{encoder}
-	}
-	if typ == textMarshalerType {
-		checkIsEmpty := createCheckIsEmpty(ctx, typ)
-		var encoder ValEncoder = &directTextMarshalerEncoder{
-			checkIsEmpty:  checkIsEmpty,
-			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
-		}
-		return encoder
-	}
-	if typ.Implements(textMarshalerType) {
-		checkIsEmpty := createCheckIsEmpty(ctx, typ)
-		var encoder ValEncoder = &textMarshalerEncoder{
-			valType:       typ,
-			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
-			checkIsEmpty:  checkIsEmpty,
-		}
-		return encoder
-	}
-	// if prefix is empty, the type is the root type
-	if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
-		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
-		var encoder ValEncoder = &textMarshalerEncoder{
-			valType:       ptrType,
-			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
-			checkIsEmpty:  checkIsEmpty,
-		}
-		return &referenceEncoder{encoder}
-	}
-	return nil
-}
-
-type marshalerEncoder struct {
-	checkIsEmpty checkIsEmpty
-	valType      reflect2.Type
-}
-
-func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	obj := encoder.valType.UnsafeIndirect(ptr)
-	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
-		stream.WriteNil()
-		return
-	}
-	bytes, err := json.Marshal(obj)
-	if err != nil {
-		stream.Error = err
-	} else {
-		stream.Write(bytes)
-	}
-}
-
-func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directMarshalerEncoder struct {
-	checkIsEmpty checkIsEmpty
-}
-
-func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	marshaler := *(*json.Marshaler)(ptr)
-	if marshaler == nil {
-		stream.WriteNil()
-		return
-	}
-	bytes, err := marshaler.MarshalJSON()
-	if err != nil {
-		stream.Error = err
-	} else {
-		stream.Write(bytes)
-	}
-}
-
-func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type textMarshalerEncoder struct {
-	valType       reflect2.Type
-	stringEncoder ValEncoder
-	checkIsEmpty  checkIsEmpty
-}
-
-func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	obj := encoder.valType.UnsafeIndirect(ptr)
-	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
-		stream.WriteNil()
-		return
-	}
-	marshaler := (obj).(encoding.TextMarshaler)
-	bytes, err := marshaler.MarshalText()
-	if err != nil {
-		stream.Error = err
-	} else {
-		str := string(bytes)
-		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
-	}
-}
-
-func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directTextMarshalerEncoder struct {
-	stringEncoder ValEncoder
-	checkIsEmpty  checkIsEmpty
-}
-
-func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	marshaler := *(*encoding.TextMarshaler)(ptr)
-	if marshaler == nil {
-		stream.WriteNil()
-		return
-	}
-	bytes, err := marshaler.MarshalText()
-	if err != nil {
-		stream.Error = err
-	} else {
-		str := string(bytes)
-		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
-	}
-}
-
-func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type unmarshalerDecoder struct {
-	valType reflect2.Type
-}
-
-func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	valType := decoder.valType
-	obj := valType.UnsafeIndirect(ptr)
-	unmarshaler := obj.(json.Unmarshaler)
-	iter.nextToken()
-	iter.unreadByte() // skip spaces
-	bytes := iter.SkipAndReturnBytes()
-	err := unmarshaler.UnmarshalJSON(bytes)
-	if err != nil {
-		iter.ReportError("unmarshalerDecoder", err.Error())
-	}
-}
-
-type textUnmarshalerDecoder struct {
-	valType reflect2.Type
-}
-
-func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	valType := decoder.valType
-	obj := valType.UnsafeIndirect(ptr)
-	if reflect2.IsNil(obj) {
-		ptrType := valType.(*reflect2.UnsafePtrType)
-		elemType := ptrType.Elem()
-		elem := elemType.UnsafeNew()
-		ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
-		obj = valType.UnsafeIndirect(ptr)
-	}
-	unmarshaler := (obj).(encoding.TextUnmarshaler)
-	str := iter.ReadString()
-	err := unmarshaler.UnmarshalText([]byte(str))
-	if err != nil {
-		iter.ReportError("textUnmarshalerDecoder", err.Error())
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
deleted file mode 100644
index 9042eb0..0000000
--- a/vendor/github.com/json-iterator/go/reflect_native.go
+++ /dev/null
@@ -1,451 +0,0 @@
-package jsoniter
-
-import (
-	"encoding/base64"
-	"reflect"
-	"strconv"
-	"unsafe"
-
-	"github.com/modern-go/reflect2"
-)
-
-const ptrSize = 32 << uintptr(^uintptr(0)>>63)
-
-func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
-	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
-		sliceDecoder := decoderOfSlice(ctx, typ)
-		return &base64Codec{sliceDecoder: sliceDecoder}
-	}
-	typeName := typ.String()
-	kind := typ.Kind()
-	switch kind {
-	case reflect.String:
-		if typeName != "string" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
-		}
-		return &stringCodec{}
-	case reflect.Int:
-		if typeName != "int" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
-		}
-		if strconv.IntSize == 32 {
-			return &int32Codec{}
-		}
-		return &int64Codec{}
-	case reflect.Int8:
-		if typeName != "int8" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
-		}
-		return &int8Codec{}
-	case reflect.Int16:
-		if typeName != "int16" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
-		}
-		return &int16Codec{}
-	case reflect.Int32:
-		if typeName != "int32" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
-		}
-		return &int32Codec{}
-	case reflect.Int64:
-		if typeName != "int64" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
-		}
-		return &int64Codec{}
-	case reflect.Uint:
-		if typeName != "uint" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
-		}
-		if strconv.IntSize == 32 {
-			return &uint32Codec{}
-		}
-		return &uint64Codec{}
-	case reflect.Uint8:
-		if typeName != "uint8" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
-		}
-		return &uint8Codec{}
-	case reflect.Uint16:
-		if typeName != "uint16" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
-		}
-		return &uint16Codec{}
-	case reflect.Uint32:
-		if typeName != "uint32" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
-		}
-		return &uint32Codec{}
-	case reflect.Uintptr:
-		if typeName != "uintptr" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
-		}
-		if ptrSize == 32 {
-			return &uint32Codec{}
-		}
-		return &uint64Codec{}
-	case reflect.Uint64:
-		if typeName != "uint64" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
-		}
-		return &uint64Codec{}
-	case reflect.Float32:
-		if typeName != "float32" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
-		}
-		return &float32Codec{}
-	case reflect.Float64:
-		if typeName != "float64" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
-		}
-		return &float64Codec{}
-	case reflect.Bool:
-		if typeName != "bool" {
-			return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
-		}
-		return &boolCodec{}
-	}
-	return nil
-}
-
-func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
-	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
-		sliceDecoder := decoderOfSlice(ctx, typ)
-		return &base64Codec{sliceDecoder: sliceDecoder}
-	}
-	typeName := typ.String()
-	switch typ.Kind() {
-	case reflect.String:
-		if typeName != "string" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
-		}
-		return &stringCodec{}
-	case reflect.Int:
-		if typeName != "int" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
-		}
-		if strconv.IntSize == 32 {
-			return &int32Codec{}
-		}
-		return &int64Codec{}
-	case reflect.Int8:
-		if typeName != "int8" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
-		}
-		return &int8Codec{}
-	case reflect.Int16:
-		if typeName != "int16" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
-		}
-		return &int16Codec{}
-	case reflect.Int32:
-		if typeName != "int32" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
-		}
-		return &int32Codec{}
-	case reflect.Int64:
-		if typeName != "int64" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
-		}
-		return &int64Codec{}
-	case reflect.Uint:
-		if typeName != "uint" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
-		}
-		if strconv.IntSize == 32 {
-			return &uint32Codec{}
-		}
-		return &uint64Codec{}
-	case reflect.Uint8:
-		if typeName != "uint8" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
-		}
-		return &uint8Codec{}
-	case reflect.Uint16:
-		if typeName != "uint16" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
-		}
-		return &uint16Codec{}
-	case reflect.Uint32:
-		if typeName != "uint32" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
-		}
-		return &uint32Codec{}
-	case reflect.Uintptr:
-		if typeName != "uintptr" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
-		}
-		if ptrSize == 32 {
-			return &uint32Codec{}
-		}
-		return &uint64Codec{}
-	case reflect.Uint64:
-		if typeName != "uint64" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
-		}
-		return &uint64Codec{}
-	case reflect.Float32:
-		if typeName != "float32" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
-		}
-		return &float32Codec{}
-	case reflect.Float64:
-		if typeName != "float64" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
-		}
-		return &float64Codec{}
-	case reflect.Bool:
-		if typeName != "bool" {
-			return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
-		}
-		return &boolCodec{}
-	}
-	return nil
-}
-
-type stringCodec struct {
-}
-
-func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	*((*string)(ptr)) = iter.ReadString()
-}
-
-func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	str := *((*string)(ptr))
-	stream.WriteString(str)
-}
-
-func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*string)(ptr)) == ""
-}
-
-type int8Codec struct {
-}
-
-func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*int8)(ptr)) = iter.ReadInt8()
-	}
-}
-
-func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteInt8(*((*int8)(ptr)))
-}
-
-func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*int8)(ptr)) == 0
-}
-
-type int16Codec struct {
-}
-
-func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*int16)(ptr)) = iter.ReadInt16()
-	}
-}
-
-func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteInt16(*((*int16)(ptr)))
-}
-
-func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*int16)(ptr)) == 0
-}
-
-type int32Codec struct {
-}
-
-func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*int32)(ptr)) = iter.ReadInt32()
-	}
-}
-
-func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteInt32(*((*int32)(ptr)))
-}
-
-func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*int32)(ptr)) == 0
-}
-
-type int64Codec struct {
-}
-
-func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*int64)(ptr)) = iter.ReadInt64()
-	}
-}
-
-func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteInt64(*((*int64)(ptr)))
-}
-
-func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*int64)(ptr)) == 0
-}
-
-type uint8Codec struct {
-}
-
-func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*uint8)(ptr)) = iter.ReadUint8()
-	}
-}
-
-func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteUint8(*((*uint8)(ptr)))
-}
-
-func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*uint8)(ptr)) == 0
-}
-
-type uint16Codec struct {
-}
-
-func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*uint16)(ptr)) = iter.ReadUint16()
-	}
-}
-
-func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteUint16(*((*uint16)(ptr)))
-}
-
-func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*uint16)(ptr)) == 0
-}
-
-type uint32Codec struct {
-}
-
-func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*uint32)(ptr)) = iter.ReadUint32()
-	}
-}
-
-func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteUint32(*((*uint32)(ptr)))
-}
-
-func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*uint32)(ptr)) == 0
-}
-
-type uint64Codec struct {
-}
-
-func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*uint64)(ptr)) = iter.ReadUint64()
-	}
-}
-
-func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteUint64(*((*uint64)(ptr)))
-}
-
-func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*uint64)(ptr)) == 0
-}
-
-type float32Codec struct {
-}
-
-func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*float32)(ptr)) = iter.ReadFloat32()
-	}
-}
-
-func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteFloat32(*((*float32)(ptr)))
-}
-
-func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*float32)(ptr)) == 0
-}
-
-type float64Codec struct {
-}
-
-func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*float64)(ptr)) = iter.ReadFloat64()
-	}
-}
-
-func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteFloat64(*((*float64)(ptr)))
-}
-
-func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*float64)(ptr)) == 0
-}
-
-type boolCodec struct {
-}
-
-func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.ReadNil() {
-		*((*bool)(ptr)) = iter.ReadBool()
-	}
-}
-
-func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteBool(*((*bool)(ptr)))
-}
-
-func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
-	return !(*((*bool)(ptr)))
-}
-
-type base64Codec struct {
-	sliceType    *reflect2.UnsafeSliceType
-	sliceDecoder ValDecoder
-}
-
-func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if iter.ReadNil() {
-		codec.sliceType.UnsafeSetNil(ptr)
-		return
-	}
-	switch iter.WhatIsNext() {
-	case StringValue:
-		src := iter.ReadString()
-		dst, err := base64.StdEncoding.DecodeString(src)
-		if err != nil {
-			iter.ReportError("decode base64", err.Error())
-		} else {
-			codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
-		}
-	case ArrayValue:
-		codec.sliceDecoder.Decode(ptr, iter)
-	default:
-		iter.ReportError("base64Codec", "invalid input")
-	}
-}
-
-func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
-	src := *((*[]byte)(ptr))
-	if len(src) == 0 {
-		stream.WriteNil()
-		return
-	}
-	encoding := base64.StdEncoding
-	stream.writeByte('"')
-	size := encoding.EncodedLen(len(src))
-	buf := make([]byte, size)
-	encoding.Encode(buf, src)
-	stream.buf = append(stream.buf, buf...)
-	stream.writeByte('"')
-}
-
-func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
-	return len(*((*[]byte)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
deleted file mode 100644
index 43ec71d..0000000
--- a/vendor/github.com/json-iterator/go/reflect_optional.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package jsoniter
-
-import (
-	"github.com/modern-go/reflect2"
-	"reflect"
-	"unsafe"
-)
-
-func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
-	ptrType := typ.(*reflect2.UnsafePtrType)
-	elemType := ptrType.Elem()
-	decoder := decoderOfType(ctx, elemType)
-	if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
-		return &dereferenceDecoder{elemType, decoder}
-	}
-	return &OptionalDecoder{elemType, decoder}
-}
-
-func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
-	ptrType := typ.(*reflect2.UnsafePtrType)
-	elemType := ptrType.Elem()
-	elemEncoder := encoderOfType(ctx, elemType)
-	encoder := &OptionalEncoder{elemEncoder}
-	return encoder
-}
-
-type OptionalDecoder struct {
-	ValueType    reflect2.Type
-	ValueDecoder ValDecoder
-}
-
-func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if iter.ReadNil() {
-		*((*unsafe.Pointer)(ptr)) = nil
-	} else {
-		if *((*unsafe.Pointer)(ptr)) == nil {
-			//pointer to null, we have to allocate memory to hold the value
-			newPtr := decoder.ValueType.UnsafeNew()
-			decoder.ValueDecoder.Decode(newPtr, iter)
-			*((*unsafe.Pointer)(ptr)) = newPtr
-		} else {
-			//reuse existing instance
-			decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
-		}
-	}
-}
-
-type dereferenceDecoder struct {
-	// only to deference a pointer
-	valueType    reflect2.Type
-	valueDecoder ValDecoder
-}
-
-func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if *((*unsafe.Pointer)(ptr)) == nil {
-		//pointer to null, we have to allocate memory to hold the value
-		newPtr := decoder.valueType.UnsafeNew()
-		decoder.valueDecoder.Decode(newPtr, iter)
-		*((*unsafe.Pointer)(ptr)) = newPtr
-	} else {
-		//reuse existing instance
-		decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
-	}
-}
-
-type OptionalEncoder struct {
-	ValueEncoder ValEncoder
-}
-
-func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	if *((*unsafe.Pointer)(ptr)) == nil {
-		stream.WriteNil()
-	} else {
-		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
-	}
-}
-
-func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return *((*unsafe.Pointer)(ptr)) == nil
-}
-
-type dereferenceEncoder struct {
-	ValueEncoder ValEncoder
-}
-
-func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	if *((*unsafe.Pointer)(ptr)) == nil {
-		stream.WriteNil()
-	} else {
-		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
-	}
-}
-
-func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	dePtr := *((*unsafe.Pointer)(ptr))
-	if dePtr == nil {
-		return true
-	}
-	return encoder.ValueEncoder.IsEmpty(dePtr)
-}
-
-func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
-	deReferenced := *((*unsafe.Pointer)(ptr))
-	if deReferenced == nil {
-		return true
-	}
-	isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
-	if !converted {
-		return false
-	}
-	fieldPtr := unsafe.Pointer(deReferenced)
-	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type referenceEncoder struct {
-	encoder ValEncoder
-}
-
-func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-type referenceDecoder struct {
-	decoder ValDecoder
-}
-
-func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
deleted file mode 100644
index 9441d79..0000000
--- a/vendor/github.com/json-iterator/go/reflect_slice.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"io"
-	"unsafe"
-)
-
-func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
-	sliceType := typ.(*reflect2.UnsafeSliceType)
-	decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
-	return &sliceDecoder{sliceType, decoder}
-}
-
-func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
-	sliceType := typ.(*reflect2.UnsafeSliceType)
-	encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
-	return &sliceEncoder{sliceType, encoder}
-}
-
-type sliceEncoder struct {
-	sliceType   *reflect2.UnsafeSliceType
-	elemEncoder ValEncoder
-}
-
-func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	if encoder.sliceType.UnsafeIsNil(ptr) {
-		stream.WriteNil()
-		return
-	}
-	length := encoder.sliceType.UnsafeLengthOf(ptr)
-	if length == 0 {
-		stream.WriteEmptyArray()
-		return
-	}
-	stream.WriteArrayStart()
-	encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
-	for i := 1; i < length; i++ {
-		stream.WriteMore()
-		elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
-		encoder.elemEncoder.Encode(elemPtr, stream)
-	}
-	stream.WriteArrayEnd()
-	if stream.Error != nil && stream.Error != io.EOF {
-		stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
-	}
-}
-
-func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.sliceType.UnsafeLengthOf(ptr) == 0
-}
-
-type sliceDecoder struct {
-	sliceType   *reflect2.UnsafeSliceType
-	elemDecoder ValDecoder
-}
-
-func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.doDecode(ptr, iter)
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
-	}
-}
-
-func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
-	c := iter.nextToken()
-	sliceType := decoder.sliceType
-	if c == 'n' {
-		iter.skipThreeBytes('u', 'l', 'l')
-		sliceType.UnsafeSetNil(ptr)
-		return
-	}
-	if c != '[' {
-		iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
-		return
-	}
-	c = iter.nextToken()
-	if c == ']' {
-		sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
-		return
-	}
-	iter.unreadByte()
-	sliceType.UnsafeGrow(ptr, 1)
-	elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
-	decoder.elemDecoder.Decode(elemPtr, iter)
-	length := 1
-	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
-		idx := length
-		length += 1
-		sliceType.UnsafeGrow(ptr, length)
-		elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
-		decoder.elemDecoder.Decode(elemPtr, iter)
-	}
-	if c != ']' {
-		iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
-		return
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
deleted file mode 100644
index 355d2d1..0000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ /dev/null
@@ -1,1048 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"unsafe"
-
-	"github.com/modern-go/reflect2"
-)
-
-func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
-	bindings := map[string]*Binding{}
-	structDescriptor := describeStruct(ctx, typ)
-	for _, binding := range structDescriptor.Fields {
-		for _, fromName := range binding.FromNames {
-			old := bindings[fromName]
-			if old == nil {
-				bindings[fromName] = binding
-				continue
-			}
-			ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
-			if ignoreOld {
-				delete(bindings, fromName)
-			}
-			if !ignoreNew {
-				bindings[fromName] = binding
-			}
-		}
-	}
-	fields := map[string]*structFieldDecoder{}
-	for k, binding := range bindings {
-		fields[k] = binding.Decoder.(*structFieldDecoder)
-	}
-
-	if !ctx.caseSensitive() {
-		for k, binding := range bindings {
-			if _, found := fields[strings.ToLower(k)]; !found {
-				fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
-			}
-		}
-	}
-
-	return createStructDecoder(ctx, typ, fields)
-}
-
-func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
-	if ctx.disallowUnknownFields {
-		return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
-	}
-	knownHash := map[int64]struct{}{
-		0: {},
-	}
-
-	switch len(fields) {
-	case 0:
-		return &skipObjectDecoder{typ}
-	case 1:
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
-		}
-	case 2:
-		var fieldHash1 int64
-		var fieldHash2 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldHash1 == 0 {
-				fieldHash1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else {
-				fieldHash2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			}
-		}
-		return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
-	case 3:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			}
-		}
-		return &threeFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3}
-	case 4:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			}
-		}
-		return &fourFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4}
-	case 5:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			}
-		}
-		return &fiveFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5}
-	case 6:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldName6 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		var fieldDecoder6 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else if fieldName5 == 0 {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			} else {
-				fieldName6 = fieldHash
-				fieldDecoder6 = fieldDecoder
-			}
-		}
-		return &sixFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5,
-			fieldName6, fieldDecoder6}
-	case 7:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldName6 int64
-		var fieldName7 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		var fieldDecoder6 *structFieldDecoder
-		var fieldDecoder7 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else if fieldName5 == 0 {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			} else if fieldName6 == 0 {
-				fieldName6 = fieldHash
-				fieldDecoder6 = fieldDecoder
-			} else {
-				fieldName7 = fieldHash
-				fieldDecoder7 = fieldDecoder
-			}
-		}
-		return &sevenFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5,
-			fieldName6, fieldDecoder6,
-			fieldName7, fieldDecoder7}
-	case 8:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldName6 int64
-		var fieldName7 int64
-		var fieldName8 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		var fieldDecoder6 *structFieldDecoder
-		var fieldDecoder7 *structFieldDecoder
-		var fieldDecoder8 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else if fieldName5 == 0 {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			} else if fieldName6 == 0 {
-				fieldName6 = fieldHash
-				fieldDecoder6 = fieldDecoder
-			} else if fieldName7 == 0 {
-				fieldName7 = fieldHash
-				fieldDecoder7 = fieldDecoder
-			} else {
-				fieldName8 = fieldHash
-				fieldDecoder8 = fieldDecoder
-			}
-		}
-		return &eightFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5,
-			fieldName6, fieldDecoder6,
-			fieldName7, fieldDecoder7,
-			fieldName8, fieldDecoder8}
-	case 9:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldName6 int64
-		var fieldName7 int64
-		var fieldName8 int64
-		var fieldName9 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		var fieldDecoder6 *structFieldDecoder
-		var fieldDecoder7 *structFieldDecoder
-		var fieldDecoder8 *structFieldDecoder
-		var fieldDecoder9 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else if fieldName5 == 0 {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			} else if fieldName6 == 0 {
-				fieldName6 = fieldHash
-				fieldDecoder6 = fieldDecoder
-			} else if fieldName7 == 0 {
-				fieldName7 = fieldHash
-				fieldDecoder7 = fieldDecoder
-			} else if fieldName8 == 0 {
-				fieldName8 = fieldHash
-				fieldDecoder8 = fieldDecoder
-			} else {
-				fieldName9 = fieldHash
-				fieldDecoder9 = fieldDecoder
-			}
-		}
-		return &nineFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5,
-			fieldName6, fieldDecoder6,
-			fieldName7, fieldDecoder7,
-			fieldName8, fieldDecoder8,
-			fieldName9, fieldDecoder9}
-	case 10:
-		var fieldName1 int64
-		var fieldName2 int64
-		var fieldName3 int64
-		var fieldName4 int64
-		var fieldName5 int64
-		var fieldName6 int64
-		var fieldName7 int64
-		var fieldName8 int64
-		var fieldName9 int64
-		var fieldName10 int64
-		var fieldDecoder1 *structFieldDecoder
-		var fieldDecoder2 *structFieldDecoder
-		var fieldDecoder3 *structFieldDecoder
-		var fieldDecoder4 *structFieldDecoder
-		var fieldDecoder5 *structFieldDecoder
-		var fieldDecoder6 *structFieldDecoder
-		var fieldDecoder7 *structFieldDecoder
-		var fieldDecoder8 *structFieldDecoder
-		var fieldDecoder9 *structFieldDecoder
-		var fieldDecoder10 *structFieldDecoder
-		for fieldName, fieldDecoder := range fields {
-			fieldHash := calcHash(fieldName, ctx.caseSensitive())
-			_, known := knownHash[fieldHash]
-			if known {
-				return &generalStructDecoder{typ, fields, false}
-			}
-			knownHash[fieldHash] = struct{}{}
-			if fieldName1 == 0 {
-				fieldName1 = fieldHash
-				fieldDecoder1 = fieldDecoder
-			} else if fieldName2 == 0 {
-				fieldName2 = fieldHash
-				fieldDecoder2 = fieldDecoder
-			} else if fieldName3 == 0 {
-				fieldName3 = fieldHash
-				fieldDecoder3 = fieldDecoder
-			} else if fieldName4 == 0 {
-				fieldName4 = fieldHash
-				fieldDecoder4 = fieldDecoder
-			} else if fieldName5 == 0 {
-				fieldName5 = fieldHash
-				fieldDecoder5 = fieldDecoder
-			} else if fieldName6 == 0 {
-				fieldName6 = fieldHash
-				fieldDecoder6 = fieldDecoder
-			} else if fieldName7 == 0 {
-				fieldName7 = fieldHash
-				fieldDecoder7 = fieldDecoder
-			} else if fieldName8 == 0 {
-				fieldName8 = fieldHash
-				fieldDecoder8 = fieldDecoder
-			} else if fieldName9 == 0 {
-				fieldName9 = fieldHash
-				fieldDecoder9 = fieldDecoder
-			} else {
-				fieldName10 = fieldHash
-				fieldDecoder10 = fieldDecoder
-			}
-		}
-		return &tenFieldsStructDecoder{typ,
-			fieldName1, fieldDecoder1,
-			fieldName2, fieldDecoder2,
-			fieldName3, fieldDecoder3,
-			fieldName4, fieldDecoder4,
-			fieldName5, fieldDecoder5,
-			fieldName6, fieldDecoder6,
-			fieldName7, fieldDecoder7,
-			fieldName8, fieldDecoder8,
-			fieldName9, fieldDecoder9,
-			fieldName10, fieldDecoder10}
-	}
-	return &generalStructDecoder{typ, fields, false}
-}
-
-type generalStructDecoder struct {
-	typ                   reflect2.Type
-	fields                map[string]*structFieldDecoder
-	disallowUnknownFields bool
-}
-
-func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	var c byte
-	for c = ','; c == ','; c = iter.nextToken() {
-		decoder.decodeOneField(ptr, iter)
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-	if c != '}' {
-		iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
-	}
-}
-
-func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
-	var field string
-	var fieldDecoder *structFieldDecoder
-	if iter.cfg.objectFieldMustBeSimpleString {
-		fieldBytes := iter.ReadStringAsSlice()
-		field = *(*string)(unsafe.Pointer(&fieldBytes))
-		fieldDecoder = decoder.fields[field]
-		if fieldDecoder == nil && !iter.cfg.caseSensitive {
-			fieldDecoder = decoder.fields[strings.ToLower(field)]
-		}
-	} else {
-		field = iter.ReadString()
-		fieldDecoder = decoder.fields[field]
-		if fieldDecoder == nil && !iter.cfg.caseSensitive {
-			fieldDecoder = decoder.fields[strings.ToLower(field)]
-		}
-	}
-	if fieldDecoder == nil {
-		msg := "found unknown field: " + field
-		if decoder.disallowUnknownFields {
-			iter.ReportError("ReadObject", msg)
-		}
-		c := iter.nextToken()
-		if c != ':' {
-			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-		}
-		iter.Skip()
-		return
-	}
-	c := iter.nextToken()
-	if c != ':' {
-		iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
-	}
-	fieldDecoder.Decode(ptr, iter)
-}
-
-type skipObjectDecoder struct {
-	typ reflect2.Type
-}
-
-func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	valueType := iter.WhatIsNext()
-	if valueType != ObjectValue && valueType != NilValue {
-		iter.ReportError("skipObjectDecoder", "expect object or null")
-		return
-	}
-	iter.Skip()
-}
-
-type oneFieldStructDecoder struct {
-	typ          reflect2.Type
-	fieldHash    int64
-	fieldDecoder *structFieldDecoder
-}
-
-func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		if iter.readFieldHash() == decoder.fieldHash {
-			decoder.fieldDecoder.Decode(ptr, iter)
-		} else {
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type twoFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-}
-
-func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type threeFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-}
-
-func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type fourFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-}
-
-func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type fiveFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-	fieldHash5    int64
-	fieldDecoder5 *structFieldDecoder
-}
-
-func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type sixFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-	fieldHash5    int64
-	fieldDecoder5 *structFieldDecoder
-	fieldHash6    int64
-	fieldDecoder6 *structFieldDecoder
-}
-
-func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		case decoder.fieldHash6:
-			decoder.fieldDecoder6.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type sevenFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-	fieldHash5    int64
-	fieldDecoder5 *structFieldDecoder
-	fieldHash6    int64
-	fieldDecoder6 *structFieldDecoder
-	fieldHash7    int64
-	fieldDecoder7 *structFieldDecoder
-}
-
-func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		case decoder.fieldHash6:
-			decoder.fieldDecoder6.Decode(ptr, iter)
-		case decoder.fieldHash7:
-			decoder.fieldDecoder7.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type eightFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-	fieldHash5    int64
-	fieldDecoder5 *structFieldDecoder
-	fieldHash6    int64
-	fieldDecoder6 *structFieldDecoder
-	fieldHash7    int64
-	fieldDecoder7 *structFieldDecoder
-	fieldHash8    int64
-	fieldDecoder8 *structFieldDecoder
-}
-
-func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		case decoder.fieldHash6:
-			decoder.fieldDecoder6.Decode(ptr, iter)
-		case decoder.fieldHash7:
-			decoder.fieldDecoder7.Decode(ptr, iter)
-		case decoder.fieldHash8:
-			decoder.fieldDecoder8.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type nineFieldsStructDecoder struct {
-	typ           reflect2.Type
-	fieldHash1    int64
-	fieldDecoder1 *structFieldDecoder
-	fieldHash2    int64
-	fieldDecoder2 *structFieldDecoder
-	fieldHash3    int64
-	fieldDecoder3 *structFieldDecoder
-	fieldHash4    int64
-	fieldDecoder4 *structFieldDecoder
-	fieldHash5    int64
-	fieldDecoder5 *structFieldDecoder
-	fieldHash6    int64
-	fieldDecoder6 *structFieldDecoder
-	fieldHash7    int64
-	fieldDecoder7 *structFieldDecoder
-	fieldHash8    int64
-	fieldDecoder8 *structFieldDecoder
-	fieldHash9    int64
-	fieldDecoder9 *structFieldDecoder
-}
-
-func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		case decoder.fieldHash6:
-			decoder.fieldDecoder6.Decode(ptr, iter)
-		case decoder.fieldHash7:
-			decoder.fieldDecoder7.Decode(ptr, iter)
-		case decoder.fieldHash8:
-			decoder.fieldDecoder8.Decode(ptr, iter)
-		case decoder.fieldHash9:
-			decoder.fieldDecoder9.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type tenFieldsStructDecoder struct {
-	typ            reflect2.Type
-	fieldHash1     int64
-	fieldDecoder1  *structFieldDecoder
-	fieldHash2     int64
-	fieldDecoder2  *structFieldDecoder
-	fieldHash3     int64
-	fieldDecoder3  *structFieldDecoder
-	fieldHash4     int64
-	fieldDecoder4  *structFieldDecoder
-	fieldHash5     int64
-	fieldDecoder5  *structFieldDecoder
-	fieldHash6     int64
-	fieldDecoder6  *structFieldDecoder
-	fieldHash7     int64
-	fieldDecoder7  *structFieldDecoder
-	fieldHash8     int64
-	fieldDecoder8  *structFieldDecoder
-	fieldHash9     int64
-	fieldDecoder9  *structFieldDecoder
-	fieldHash10    int64
-	fieldDecoder10 *structFieldDecoder
-}
-
-func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	if !iter.readObjectStart() {
-		return
-	}
-	for {
-		switch iter.readFieldHash() {
-		case decoder.fieldHash1:
-			decoder.fieldDecoder1.Decode(ptr, iter)
-		case decoder.fieldHash2:
-			decoder.fieldDecoder2.Decode(ptr, iter)
-		case decoder.fieldHash3:
-			decoder.fieldDecoder3.Decode(ptr, iter)
-		case decoder.fieldHash4:
-			decoder.fieldDecoder4.Decode(ptr, iter)
-		case decoder.fieldHash5:
-			decoder.fieldDecoder5.Decode(ptr, iter)
-		case decoder.fieldHash6:
-			decoder.fieldDecoder6.Decode(ptr, iter)
-		case decoder.fieldHash7:
-			decoder.fieldDecoder7.Decode(ptr, iter)
-		case decoder.fieldHash8:
-			decoder.fieldDecoder8.Decode(ptr, iter)
-		case decoder.fieldHash9:
-			decoder.fieldDecoder9.Decode(ptr, iter)
-		case decoder.fieldHash10:
-			decoder.fieldDecoder10.Decode(ptr, iter)
-		default:
-			iter.Skip()
-		}
-		if iter.isObjectEnd() {
-			break
-		}
-	}
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
-	}
-}
-
-type structFieldDecoder struct {
-	field        reflect2.StructField
-	fieldDecoder ValDecoder
-}
-
-func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	fieldPtr := decoder.field.UnsafeGet(ptr)
-	decoder.fieldDecoder.Decode(fieldPtr, iter)
-	if iter.Error != nil && iter.Error != io.EOF {
-		iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
-	}
-}
-
-type stringModeStringDecoder struct {
-	elemDecoder ValDecoder
-	cfg         *frozenConfig
-}
-
-func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	decoder.elemDecoder.Decode(ptr, iter)
-	str := *((*string)(ptr))
-	tempIter := decoder.cfg.BorrowIterator([]byte(str))
-	defer decoder.cfg.ReturnIterator(tempIter)
-	*((*string)(ptr)) = tempIter.ReadString()
-}
-
-type stringModeNumberDecoder struct {
-	elemDecoder ValDecoder
-}
-
-func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
-	c := iter.nextToken()
-	if c != '"' {
-		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
-		return
-	}
-	decoder.elemDecoder.Decode(ptr, iter)
-	if iter.Error != nil {
-		return
-	}
-	c = iter.readByte()
-	if c != '"' {
-		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
-		return
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
deleted file mode 100644
index d0759cf..0000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package jsoniter
-
-import (
-	"fmt"
-	"github.com/modern-go/reflect2"
-	"io"
-	"reflect"
-	"unsafe"
-)
-
-func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
-	type bindingTo struct {
-		binding *Binding
-		toName  string
-		ignored bool
-	}
-	orderedBindings := []*bindingTo{}
-	structDescriptor := describeStruct(ctx, typ)
-	for _, binding := range structDescriptor.Fields {
-		for _, toName := range binding.ToNames {
-			new := &bindingTo{
-				binding: binding,
-				toName:  toName,
-			}
-			for _, old := range orderedBindings {
-				if old.toName != toName {
-					continue
-				}
-				old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
-			}
-			orderedBindings = append(orderedBindings, new)
-		}
-	}
-	if len(orderedBindings) == 0 {
-		return &emptyStructEncoder{}
-	}
-	finalOrderedFields := []structFieldTo{}
-	for _, bindingTo := range orderedBindings {
-		if !bindingTo.ignored {
-			finalOrderedFields = append(finalOrderedFields, structFieldTo{
-				encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
-				toName:  bindingTo.toName,
-			})
-		}
-	}
-	return &structEncoder{typ, finalOrderedFields}
-}
-
-func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
-	encoder := createEncoderOfNative(ctx, typ)
-	if encoder != nil {
-		return encoder
-	}
-	kind := typ.Kind()
-	switch kind {
-	case reflect.Interface:
-		return &dynamicEncoder{typ}
-	case reflect.Struct:
-		return &structEncoder{typ: typ}
-	case reflect.Array:
-		return &arrayEncoder{}
-	case reflect.Slice:
-		return &sliceEncoder{}
-	case reflect.Map:
-		return encoderOfMap(ctx, typ)
-	case reflect.Ptr:
-		return &OptionalEncoder{}
-	default:
-		return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
-	}
-}
-
-func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
-	newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
-	oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
-	if newTagged {
-		if oldTagged {
-			if len(old.levels) > len(new.levels) {
-				return true, false
-			} else if len(new.levels) > len(old.levels) {
-				return false, true
-			} else {
-				return true, true
-			}
-		} else {
-			return true, false
-		}
-	} else {
-		if oldTagged {
-			return true, false
-		}
-		if len(old.levels) > len(new.levels) {
-			return true, false
-		} else if len(new.levels) > len(old.levels) {
-			return false, true
-		} else {
-			return true, true
-		}
-	}
-}
-
-type structFieldEncoder struct {
-	field        reflect2.StructField
-	fieldEncoder ValEncoder
-	omitempty    bool
-}
-
-func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	fieldPtr := encoder.field.UnsafeGet(ptr)
-	encoder.fieldEncoder.Encode(fieldPtr, stream)
-	if stream.Error != nil && stream.Error != io.EOF {
-		stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
-	}
-}
-
-func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	fieldPtr := encoder.field.UnsafeGet(ptr)
-	return encoder.fieldEncoder.IsEmpty(fieldPtr)
-}
-
-func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
-	isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
-	if !converted {
-		return false
-	}
-	fieldPtr := encoder.field.UnsafeGet(ptr)
-	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type IsEmbeddedPtrNil interface {
-	IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
-}
-
-type structEncoder struct {
-	typ    reflect2.Type
-	fields []structFieldTo
-}
-
-type structFieldTo struct {
-	encoder *structFieldEncoder
-	toName  string
-}
-
-func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteObjectStart()
-	isNotFirst := false
-	for _, field := range encoder.fields {
-		if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
-			continue
-		}
-		if field.encoder.IsEmbeddedPtrNil(ptr) {
-			continue
-		}
-		if isNotFirst {
-			stream.WriteMore()
-		}
-		stream.WriteObjectField(field.toName)
-		field.encoder.Encode(ptr, stream)
-		isNotFirst = true
-	}
-	stream.WriteObjectEnd()
-	if stream.Error != nil && stream.Error != io.EOF {
-		stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
-	}
-}
-
-func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return false
-}
-
-type emptyStructEncoder struct {
-}
-
-func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.WriteEmptyObject()
-}
-
-func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return false
-}
-
-type stringModeNumberEncoder struct {
-	elemEncoder ValEncoder
-}
-
-func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	stream.writeByte('"')
-	encoder.elemEncoder.Encode(ptr, stream)
-	stream.writeByte('"')
-}
-
-func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.elemEncoder.IsEmpty(ptr)
-}
-
-type stringModeStringEncoder struct {
-	elemEncoder ValEncoder
-	cfg         *frozenConfig
-}
-
-func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
-	tempStream := encoder.cfg.BorrowStream(nil)
-	defer encoder.cfg.ReturnStream(tempStream)
-	encoder.elemEncoder.Encode(ptr, tempStream)
-	stream.WriteString(string(tempStream.Buffer()))
-}
-
-func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
-	return encoder.elemEncoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
deleted file mode 100644
index 17662fd..0000000
--- a/vendor/github.com/json-iterator/go/stream.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package jsoniter
-
-import (
-	"io"
-)
-
-// stream is a io.Writer like object, with JSON specific write functions.
-// Error is not returned as return value, but stored as Error member on this stream instance.
-type Stream struct {
-	cfg        *frozenConfig
-	out        io.Writer
-	buf        []byte
-	Error      error
-	indention  int
-	Attachment interface{} // open for customized encoder
-}
-
-// NewStream create new stream instance.
-// cfg can be jsoniter.ConfigDefault.
-// out can be nil if write to internal buffer.
-// bufSize is the initial size for the internal buffer in bytes.
-func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
-	return &Stream{
-		cfg:       cfg.(*frozenConfig),
-		out:       out,
-		buf:       make([]byte, 0, bufSize),
-		Error:     nil,
-		indention: 0,
-	}
-}
-
-// Pool returns a pool can provide more stream with same configuration
-func (stream *Stream) Pool() StreamPool {
-	return stream.cfg
-}
-
-// Reset reuse this stream instance by assign a new writer
-func (stream *Stream) Reset(out io.Writer) {
-	stream.out = out
-	stream.buf = stream.buf[:0]
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (stream *Stream) Available() int {
-	return cap(stream.buf) - len(stream.buf)
-}
-
-// Buffered returns the number of bytes that have been written into the current buffer.
-func (stream *Stream) Buffered() int {
-	return len(stream.buf)
-}
-
-// Buffer if writer is nil, use this method to take the result
-func (stream *Stream) Buffer() []byte {
-	return stream.buf
-}
-
-// SetBuffer allows to append to the internal buffer directly
-func (stream *Stream) SetBuffer(buf []byte) {
-	stream.buf = buf
-}
-
-// Write writes the contents of p into the buffer.
-// It returns the number of bytes written.
-// If nn < len(p), it also returns an error explaining
-// why the write is short.
-func (stream *Stream) Write(p []byte) (nn int, err error) {
-	stream.buf = append(stream.buf, p...)
-	if stream.out != nil {
-		nn, err = stream.out.Write(stream.buf)
-		stream.buf = stream.buf[nn:]
-		return
-	}
-	return len(p), nil
-}
-
-// WriteByte writes a single byte.
-func (stream *Stream) writeByte(c byte) {
-	stream.buf = append(stream.buf, c)
-}
-
-func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
-	stream.buf = append(stream.buf, c1, c2)
-}
-
-func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
-	stream.buf = append(stream.buf, c1, c2, c3)
-}
-
-func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
-	stream.buf = append(stream.buf, c1, c2, c3, c4)
-}
-
-func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
-	stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (stream *Stream) Flush() error {
-	if stream.out == nil {
-		return nil
-	}
-	if stream.Error != nil {
-		return stream.Error
-	}
-	n, err := stream.out.Write(stream.buf)
-	if err != nil {
-		if stream.Error == nil {
-			stream.Error = err
-		}
-		return err
-	}
-	stream.buf = stream.buf[n:]
-	return nil
-}
-
-// WriteRaw write string out without quotes, just like []byte
-func (stream *Stream) WriteRaw(s string) {
-	stream.buf = append(stream.buf, s...)
-}
-
-// WriteNil write null to stream
-func (stream *Stream) WriteNil() {
-	stream.writeFourBytes('n', 'u', 'l', 'l')
-}
-
-// WriteTrue write true to stream
-func (stream *Stream) WriteTrue() {
-	stream.writeFourBytes('t', 'r', 'u', 'e')
-}
-
-// WriteFalse write false to stream
-func (stream *Stream) WriteFalse() {
-	stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
-}
-
-// WriteBool write true or false into stream
-func (stream *Stream) WriteBool(val bool) {
-	if val {
-		stream.WriteTrue()
-	} else {
-		stream.WriteFalse()
-	}
-}
-
-// WriteObjectStart write { with possible indention
-func (stream *Stream) WriteObjectStart() {
-	stream.indention += stream.cfg.indentionStep
-	stream.writeByte('{')
-	stream.writeIndention(0)
-}
-
-// WriteObjectField write "field": with possible indention
-func (stream *Stream) WriteObjectField(field string) {
-	stream.WriteString(field)
-	if stream.indention > 0 {
-		stream.writeTwoBytes(':', ' ')
-	} else {
-		stream.writeByte(':')
-	}
-}
-
-// WriteObjectEnd write } with possible indention
-func (stream *Stream) WriteObjectEnd() {
-	stream.writeIndention(stream.cfg.indentionStep)
-	stream.indention -= stream.cfg.indentionStep
-	stream.writeByte('}')
-}
-
-// WriteEmptyObject write {}
-func (stream *Stream) WriteEmptyObject() {
-	stream.writeByte('{')
-	stream.writeByte('}')
-}
-
-// WriteMore write , with possible indention
-func (stream *Stream) WriteMore() {
-	stream.writeByte(',')
-	stream.writeIndention(0)
-	stream.Flush()
-}
-
-// WriteArrayStart write [ with possible indention
-func (stream *Stream) WriteArrayStart() {
-	stream.indention += stream.cfg.indentionStep
-	stream.writeByte('[')
-	stream.writeIndention(0)
-}
-
-// WriteEmptyArray write []
-func (stream *Stream) WriteEmptyArray() {
-	stream.writeTwoBytes('[', ']')
-}
-
-// WriteArrayEnd write ] with possible indention
-func (stream *Stream) WriteArrayEnd() {
-	stream.writeIndention(stream.cfg.indentionStep)
-	stream.indention -= stream.cfg.indentionStep
-	stream.writeByte(']')
-}
-
-func (stream *Stream) writeIndention(delta int) {
-	if stream.indention == 0 {
-		return
-	}
-	stream.writeByte('\n')
-	toWrite := stream.indention - delta
-	for i := 0; i < toWrite; i++ {
-		stream.buf = append(stream.buf, ' ')
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
deleted file mode 100644
index f318d2c..0000000
--- a/vendor/github.com/json-iterator/go/stream_float.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package jsoniter
-
-import (
-	"math"
-	"strconv"
-)
-
-var pow10 []uint64
-
-func init() {
-	pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
-}
-
-// WriteFloat32 write float32 to stream
-func (stream *Stream) WriteFloat32(val float32) {
-	abs := math.Abs(float64(val))
-	fmt := byte('f')
-	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
-	if abs != 0 {
-		if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
-			fmt = 'e'
-		}
-	}
-	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
-}
-
-// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat32Lossy(val float32) {
-	if val < 0 {
-		stream.writeByte('-')
-		val = -val
-	}
-	if val > 0x4ffffff {
-		stream.WriteFloat32(val)
-		return
-	}
-	precision := 6
-	exp := uint64(1000000) // 6
-	lval := uint64(float64(val)*float64(exp) + 0.5)
-	stream.WriteUint64(lval / exp)
-	fval := lval % exp
-	if fval == 0 {
-		return
-	}
-	stream.writeByte('.')
-	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
-		stream.writeByte('0')
-	}
-	stream.WriteUint64(fval)
-	for stream.buf[len(stream.buf)-1] == '0' {
-		stream.buf = stream.buf[:len(stream.buf)-1]
-	}
-}
-
-// WriteFloat64 write float64 to stream
-func (stream *Stream) WriteFloat64(val float64) {
-	abs := math.Abs(val)
-	fmt := byte('f')
-	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
-	if abs != 0 {
-		if abs < 1e-6 || abs >= 1e21 {
-			fmt = 'e'
-		}
-	}
-	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
-}
-
-// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat64Lossy(val float64) {
-	if val < 0 {
-		stream.writeByte('-')
-		val = -val
-	}
-	if val > 0x4ffffff {
-		stream.WriteFloat64(val)
-		return
-	}
-	precision := 6
-	exp := uint64(1000000) // 6
-	lval := uint64(val*float64(exp) + 0.5)
-	stream.WriteUint64(lval / exp)
-	fval := lval % exp
-	if fval == 0 {
-		return
-	}
-	stream.writeByte('.')
-	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
-		stream.writeByte('0')
-	}
-	stream.WriteUint64(fval)
-	for stream.buf[len(stream.buf)-1] == '0' {
-		stream.buf = stream.buf[:len(stream.buf)-1]
-	}
-}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
deleted file mode 100644
index d1059ee..0000000
--- a/vendor/github.com/json-iterator/go/stream_int.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package jsoniter
-
-var digits []uint32
-
-func init() {
-	digits = make([]uint32, 1000)
-	for i := uint32(0); i < 1000; i++ {
-		digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
-		if i < 10 {
-			digits[i] += 2 << 24
-		} else if i < 100 {
-			digits[i] += 1 << 24
-		}
-	}
-}
-
-func writeFirstBuf(space []byte, v uint32) []byte {
-	start := v >> 24
-	if start == 0 {
-		space = append(space, byte(v>>16), byte(v>>8))
-	} else if start == 1 {
-		space = append(space, byte(v>>8))
-	}
-	space = append(space, byte(v))
-	return space
-}
-
-func writeBuf(buf []byte, v uint32) []byte {
-	return append(buf, byte(v>>16), byte(v>>8), byte(v))
-}
-
-// WriteUint8 write uint8 to stream
-func (stream *Stream) WriteUint8(val uint8) {
-	stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteInt8 write int8 to stream
-func (stream *Stream) WriteInt8(nval int8) {
-	var val uint8
-	if nval < 0 {
-		val = uint8(-nval)
-		stream.buf = append(stream.buf, '-')
-	} else {
-		val = uint8(nval)
-	}
-	stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteUint16 write uint16 to stream
-func (stream *Stream) WriteUint16(val uint16) {
-	q1 := val / 1000
-	if q1 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[val])
-		return
-	}
-	r1 := val - q1*1000
-	stream.buf = writeFirstBuf(stream.buf, digits[q1])
-	stream.buf = writeBuf(stream.buf, digits[r1])
-	return
-}
-
-// WriteInt16 write int16 to stream
-func (stream *Stream) WriteInt16(nval int16) {
-	var val uint16
-	if nval < 0 {
-		val = uint16(-nval)
-		stream.buf = append(stream.buf, '-')
-	} else {
-		val = uint16(nval)
-	}
-	stream.WriteUint16(val)
-}
-
-// WriteUint32 write uint32 to stream
-func (stream *Stream) WriteUint32(val uint32) {
-	q1 := val / 1000
-	if q1 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[val])
-		return
-	}
-	r1 := val - q1*1000
-	q2 := q1 / 1000
-	if q2 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q1])
-		stream.buf = writeBuf(stream.buf, digits[r1])
-		return
-	}
-	r2 := q1 - q2*1000
-	q3 := q2 / 1000
-	if q3 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q2])
-	} else {
-		r3 := q2 - q3*1000
-		stream.buf = append(stream.buf, byte(q3+'0'))
-		stream.buf = writeBuf(stream.buf, digits[r3])
-	}
-	stream.buf = writeBuf(stream.buf, digits[r2])
-	stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt32 write int32 to stream
-func (stream *Stream) WriteInt32(nval int32) {
-	var val uint32
-	if nval < 0 {
-		val = uint32(-nval)
-		stream.buf = append(stream.buf, '-')
-	} else {
-		val = uint32(nval)
-	}
-	stream.WriteUint32(val)
-}
-
-// WriteUint64 write uint64 to stream
-func (stream *Stream) WriteUint64(val uint64) {
-	q1 := val / 1000
-	if q1 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[val])
-		return
-	}
-	r1 := val - q1*1000
-	q2 := q1 / 1000
-	if q2 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q1])
-		stream.buf = writeBuf(stream.buf, digits[r1])
-		return
-	}
-	r2 := q1 - q2*1000
-	q3 := q2 / 1000
-	if q3 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q2])
-		stream.buf = writeBuf(stream.buf, digits[r2])
-		stream.buf = writeBuf(stream.buf, digits[r1])
-		return
-	}
-	r3 := q2 - q3*1000
-	q4 := q3 / 1000
-	if q4 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q3])
-		stream.buf = writeBuf(stream.buf, digits[r3])
-		stream.buf = writeBuf(stream.buf, digits[r2])
-		stream.buf = writeBuf(stream.buf, digits[r1])
-		return
-	}
-	r4 := q3 - q4*1000
-	q5 := q4 / 1000
-	if q5 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q4])
-		stream.buf = writeBuf(stream.buf, digits[r4])
-		stream.buf = writeBuf(stream.buf, digits[r3])
-		stream.buf = writeBuf(stream.buf, digits[r2])
-		stream.buf = writeBuf(stream.buf, digits[r1])
-		return
-	}
-	r5 := q4 - q5*1000
-	q6 := q5 / 1000
-	if q6 == 0 {
-		stream.buf = writeFirstBuf(stream.buf, digits[q5])
-	} else {
-		stream.buf = writeFirstBuf(stream.buf, digits[q6])
-		r6 := q5 - q6*1000
-		stream.buf = writeBuf(stream.buf, digits[r6])
-	}
-	stream.buf = writeBuf(stream.buf, digits[r5])
-	stream.buf = writeBuf(stream.buf, digits[r4])
-	stream.buf = writeBuf(stream.buf, digits[r3])
-	stream.buf = writeBuf(stream.buf, digits[r2])
-	stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt64 write int64 to stream
-func (stream *Stream) WriteInt64(nval int64) {
-	var val uint64
-	if nval < 0 {
-		val = uint64(-nval)
-		stream.buf = append(stream.buf, '-')
-	} else {
-		val = uint64(nval)
-	}
-	stream.WriteUint64(val)
-}
-
-// WriteInt write int to stream
-func (stream *Stream) WriteInt(val int) {
-	stream.WriteInt64(int64(val))
-}
-
-// WriteUint write uint to stream
-func (stream *Stream) WriteUint(val uint) {
-	stream.WriteUint64(uint64(val))
-}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
deleted file mode 100644
index 54c2ba0..0000000
--- a/vendor/github.com/json-iterator/go/stream_str.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package jsoniter
-
-import (
-	"unicode/utf8"
-)
-
-// htmlSafeSet holds the value true if the ASCII character with the given
-// array position can be safely represented inside a JSON string, embedded
-// inside of HTML <script> tags, without any additional escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), the backslash character ("\"), HTML opening and closing
-// tags ("<" and ">"), and the ampersand ("&").
-var htmlSafeSet = [utf8.RuneSelf]bool{
-	' ':      true,
-	'!':      true,
-	'"':      false,
-	'#':      true,
-	'$':      true,
-	'%':      true,
-	'&':      false,
-	'\'':     true,
-	'(':      true,
-	')':      true,
-	'*':      true,
-	'+':      true,
-	',':      true,
-	'-':      true,
-	'.':      true,
-	'/':      true,
-	'0':      true,
-	'1':      true,
-	'2':      true,
-	'3':      true,
-	'4':      true,
-	'5':      true,
-	'6':      true,
-	'7':      true,
-	'8':      true,
-	'9':      true,
-	':':      true,
-	';':      true,
-	'<':      false,
-	'=':      true,
-	'>':      false,
-	'?':      true,
-	'@':      true,
-	'A':      true,
-	'B':      true,
-	'C':      true,
-	'D':      true,
-	'E':      true,
-	'F':      true,
-	'G':      true,
-	'H':      true,
-	'I':      true,
-	'J':      true,
-	'K':      true,
-	'L':      true,
-	'M':      true,
-	'N':      true,
-	'O':      true,
-	'P':      true,
-	'Q':      true,
-	'R':      true,
-	'S':      true,
-	'T':      true,
-	'U':      true,
-	'V':      true,
-	'W':      true,
-	'X':      true,
-	'Y':      true,
-	'Z':      true,
-	'[':      true,
-	'\\':     false,
-	']':      true,
-	'^':      true,
-	'_':      true,
-	'`':      true,
-	'a':      true,
-	'b':      true,
-	'c':      true,
-	'd':      true,
-	'e':      true,
-	'f':      true,
-	'g':      true,
-	'h':      true,
-	'i':      true,
-	'j':      true,
-	'k':      true,
-	'l':      true,
-	'm':      true,
-	'n':      true,
-	'o':      true,
-	'p':      true,
-	'q':      true,
-	'r':      true,
-	's':      true,
-	't':      true,
-	'u':      true,
-	'v':      true,
-	'w':      true,
-	'x':      true,
-	'y':      true,
-	'z':      true,
-	'{':      true,
-	'|':      true,
-	'}':      true,
-	'~':      true,
-	'\u007f': true,
-}
-
-// safeSet holds the value true if the ASCII character with the given array
-// position can be represented inside a JSON string without any further
-// escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), and the backslash character ("\").
-var safeSet = [utf8.RuneSelf]bool{
-	' ':      true,
-	'!':      true,
-	'"':      false,
-	'#':      true,
-	'$':      true,
-	'%':      true,
-	'&':      true,
-	'\'':     true,
-	'(':      true,
-	')':      true,
-	'*':      true,
-	'+':      true,
-	',':      true,
-	'-':      true,
-	'.':      true,
-	'/':      true,
-	'0':      true,
-	'1':      true,
-	'2':      true,
-	'3':      true,
-	'4':      true,
-	'5':      true,
-	'6':      true,
-	'7':      true,
-	'8':      true,
-	'9':      true,
-	':':      true,
-	';':      true,
-	'<':      true,
-	'=':      true,
-	'>':      true,
-	'?':      true,
-	'@':      true,
-	'A':      true,
-	'B':      true,
-	'C':      true,
-	'D':      true,
-	'E':      true,
-	'F':      true,
-	'G':      true,
-	'H':      true,
-	'I':      true,
-	'J':      true,
-	'K':      true,
-	'L':      true,
-	'M':      true,
-	'N':      true,
-	'O':      true,
-	'P':      true,
-	'Q':      true,
-	'R':      true,
-	'S':      true,
-	'T':      true,
-	'U':      true,
-	'V':      true,
-	'W':      true,
-	'X':      true,
-	'Y':      true,
-	'Z':      true,
-	'[':      true,
-	'\\':     false,
-	']':      true,
-	'^':      true,
-	'_':      true,
-	'`':      true,
-	'a':      true,
-	'b':      true,
-	'c':      true,
-	'd':      true,
-	'e':      true,
-	'f':      true,
-	'g':      true,
-	'h':      true,
-	'i':      true,
-	'j':      true,
-	'k':      true,
-	'l':      true,
-	'm':      true,
-	'n':      true,
-	'o':      true,
-	'p':      true,
-	'q':      true,
-	'r':      true,
-	's':      true,
-	't':      true,
-	'u':      true,
-	'v':      true,
-	'w':      true,
-	'x':      true,
-	'y':      true,
-	'z':      true,
-	'{':      true,
-	'|':      true,
-	'}':      true,
-	'~':      true,
-	'\u007f': true,
-}
-
-var hex = "0123456789abcdef"
-
-// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
-func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
-	valLen := len(s)
-	stream.buf = append(stream.buf, '"')
-	// write string, the fast path, without utf8 and escape support
-	i := 0
-	for ; i < valLen; i++ {
-		c := s[i]
-		if c < utf8.RuneSelf && htmlSafeSet[c] {
-			stream.buf = append(stream.buf, c)
-		} else {
-			break
-		}
-	}
-	if i == valLen {
-		stream.buf = append(stream.buf, '"')
-		return
-	}
-	writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
-}
-
-func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
-	start := i
-	// for the remaining parts, we process them char by char
-	for i < valLen {
-		if b := s[i]; b < utf8.RuneSelf {
-			if htmlSafeSet[b] {
-				i++
-				continue
-			}
-			if start < i {
-				stream.WriteRaw(s[start:i])
-			}
-			switch b {
-			case '\\', '"':
-				stream.writeTwoBytes('\\', b)
-			case '\n':
-				stream.writeTwoBytes('\\', 'n')
-			case '\r':
-				stream.writeTwoBytes('\\', 'r')
-			case '\t':
-				stream.writeTwoBytes('\\', 't')
-			default:
-				// This encodes bytes < 0x20 except for \t, \n and \r.
-				// If escapeHTML is set, it also escapes <, >, and &
-				// because they can lead to security holes when
-				// user-controlled strings are rendered into JSON
-				// and served to some browsers.
-				stream.WriteRaw(`\u00`)
-				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
-			}
-			i++
-			start = i
-			continue
-		}
-		c, size := utf8.DecodeRuneInString(s[i:])
-		if c == utf8.RuneError && size == 1 {
-			if start < i {
-				stream.WriteRaw(s[start:i])
-			}
-			stream.WriteRaw(`\ufffd`)
-			i++
-			start = i
-			continue
-		}
-		// U+2028 is LINE SEPARATOR.
-		// U+2029 is PARAGRAPH SEPARATOR.
-		// They are both technically valid characters in JSON strings,
-		// but don't work in JSONP, which has to be evaluated as JavaScript,
-		// and can lead to security holes there. It is valid JSON to
-		// escape them, so we do so unconditionally.
-		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
-		if c == '\u2028' || c == '\u2029' {
-			if start < i {
-				stream.WriteRaw(s[start:i])
-			}
-			stream.WriteRaw(`\u202`)
-			stream.writeByte(hex[c&0xF])
-			i += size
-			start = i
-			continue
-		}
-		i += size
-	}
-	if start < len(s) {
-		stream.WriteRaw(s[start:])
-	}
-	stream.writeByte('"')
-}
-
-// WriteString write string to stream without html escape
-func (stream *Stream) WriteString(s string) {
-	valLen := len(s)
-	stream.buf = append(stream.buf, '"')
-	// write string, the fast path, without utf8 and escape support
-	i := 0
-	for ; i < valLen; i++ {
-		c := s[i]
-		if c > 31 && c != '"' && c != '\\' {
-			stream.buf = append(stream.buf, c)
-		} else {
-			break
-		}
-	}
-	if i == valLen {
-		stream.buf = append(stream.buf, '"')
-		return
-	}
-	writeStringSlowPath(stream, i, s, valLen)
-}
-
-func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
-	start := i
-	// for the remaining parts, we process them char by char
-	for i < valLen {
-		if b := s[i]; b < utf8.RuneSelf {
-			if safeSet[b] {
-				i++
-				continue
-			}
-			if start < i {
-				stream.WriteRaw(s[start:i])
-			}
-			switch b {
-			case '\\', '"':
-				stream.writeTwoBytes('\\', b)
-			case '\n':
-				stream.writeTwoBytes('\\', 'n')
-			case '\r':
-				stream.writeTwoBytes('\\', 'r')
-			case '\t':
-				stream.writeTwoBytes('\\', 't')
-			default:
-				// This encodes bytes < 0x20 except for \t, \n and \r.
-				// If escapeHTML is set, it also escapes <, >, and &
-				// because they can lead to security holes when
-				// user-controlled strings are rendered into JSON
-				// and served to some browsers.
-				stream.WriteRaw(`\u00`)
-				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
-			}
-			i++
-			start = i
-			continue
-		}
-		i++
-		continue
-	}
-	if start < len(s) {
-		stream.WriteRaw(s[start:])
-	}
-	stream.writeByte('"')
-}
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
deleted file mode 100755
index f4e7c0b..0000000
--- a/vendor/github.com/json-iterator/go/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
-    go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
-    if [ -f profile.out ]; then
-        cat profile.out >> coverage.txt
-        rm profile.out
-    fi
-done
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
deleted file mode 100644
index 3f2bc47..0000000
--- a/vendor/github.com/modern-go/concurrent/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
deleted file mode 100644
index 449e67c..0000000
--- a/vendor/github.com/modern-go/concurrent/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
-  - 1.8.x
-  - 1.x
-
-before_install:
-  - go get -t -v ./...
-
-script:
-  - ./test.sh
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/github.com/modern-go/concurrent/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md
deleted file mode 100644
index acab320..0000000
--- a/vendor/github.com/modern-go/concurrent/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# concurrent
-
-[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
-[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
-[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
-[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
-[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
-
-* concurrent.Map: backport sync.Map for go below 1.9
-* concurrent.Executor: goroutine with explicit ownership and cancellable
-
-# concurrent.Map
-
-because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
-
-```go
-m := concurrent.NewMap()
-m.Store("hello", "world")
-elem, found := m.Load("hello")
-// elem will be "world"
-// found will be true
-```
-
-# concurrent.Executor
-
-```go
-executor := concurrent.NewUnboundedExecutor()
-executor.Go(func(ctx context.Context) {
-    everyMillisecond := time.NewTicker(time.Millisecond)
-    for {
-        select {
-        case <-ctx.Done():
-            fmt.Println("goroutine exited")
-            return
-        case <-everyMillisecond.C:
-            // do something
-        }
-    }
-})
-time.Sleep(time.Second)
-executor.StopAndWaitForever()
-fmt.Println("executor stopped")
-```
-
-attach goroutine to executor instance, so that we can
-
-* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
-* handle panic by callback: the default behavior will no longer crash your application
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go
deleted file mode 100644
index 623dba1..0000000
--- a/vendor/github.com/modern-go/concurrent/executor.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package concurrent
-
-import "context"
-
-// Executor replace go keyword to start a new goroutine
-// the goroutine should cancel itself if the context passed in has been cancelled
-// the goroutine started by the executor, is owned by the executor
-// we can cancel all executors owned by the executor just by stop the executor itself
-// however Executor interface does not Stop method, the one starting and owning executor
-// should use the concrete type of executor, instead of this interface.
-type Executor interface {
-	// Go starts a new goroutine controlled by the context
-	Go(handler func(ctx context.Context))
-}
diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go
deleted file mode 100644
index aeabf8c..0000000
--- a/vendor/github.com/modern-go/concurrent/go_above_19.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//+build go1.9
-
-package concurrent
-
-import "sync"
-
-// Map is a wrapper for sync.Map introduced in go1.9
-type Map struct {
-	sync.Map
-}
-
-// NewMap creates a thread safe Map
-func NewMap() *Map {
-	return &Map{}
-}
diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go
deleted file mode 100644
index b9c8df7..0000000
--- a/vendor/github.com/modern-go/concurrent/go_below_19.go
+++ /dev/null
@@ -1,33 +0,0 @@
-//+build !go1.9
-
-package concurrent
-
-import "sync"
-
-// Map implements a thread safe map for go version below 1.9 using mutex
-type Map struct {
-	lock sync.RWMutex
-	data map[interface{}]interface{}
-}
-
-// NewMap creates a thread safe map
-func NewMap() *Map {
-	return &Map{
-		data: make(map[interface{}]interface{}, 32),
-	}
-}
-
-// Load is same as sync.Map Load
-func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
-	m.lock.RLock()
-	elem, found = m.data[key]
-	m.lock.RUnlock()
-	return
-}
-
-// Load is same as sync.Map Store
-func (m *Map) Store(key interface{}, elem interface{}) {
-	m.lock.Lock()
-	m.data[key] = elem
-	m.lock.Unlock()
-}
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
deleted file mode 100644
index 9756fcc..0000000
--- a/vendor/github.com/modern-go/concurrent/log.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package concurrent
-
-import (
-	"os"
-	"log"
-	"io/ioutil"
-)
-
-// ErrorLogger is used to print out error, can be set to writer other than stderr
-var ErrorLogger = log.New(os.Stderr, "", 0)
-
-// InfoLogger is used to print informational message, default to off
-var InfoLogger = log.New(ioutil.Discard, "", 0)
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
deleted file mode 100755
index d1e6b2e..0000000
--- a/vendor/github.com/modern-go/concurrent/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
-    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
-    if [ -f profile.out ]; then
-        cat profile.out >> coverage.txt
-        rm profile.out
-    fi
-done
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
deleted file mode 100644
index 05a77dc..0000000
--- a/vendor/github.com/modern-go/concurrent/unbounded_executor.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package concurrent
-
-import (
-	"context"
-	"fmt"
-	"runtime"
-	"runtime/debug"
-	"sync"
-	"time"
-	"reflect"
-)
-
-// HandlePanic logs goroutine panic by default
-var HandlePanic = func(recovered interface{}, funcName string) {
-	ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
-	ErrorLogger.Println(string(debug.Stack()))
-}
-
-// UnboundedExecutor is a executor without limits on counts of alive goroutines
-// it tracks the goroutine started by it, and can cancel them when shutdown
-type UnboundedExecutor struct {
-	ctx                   context.Context
-	cancel                context.CancelFunc
-	activeGoroutinesMutex *sync.Mutex
-	activeGoroutines      map[string]int
-	HandlePanic           func(recovered interface{}, funcName string)
-}
-
-// GlobalUnboundedExecutor has the life cycle of the program itself
-// any goroutine want to be shutdown before main exit can be started from this executor
-// GlobalUnboundedExecutor expects the main function to call stop
-// it does not magically knows the main function exits
-var GlobalUnboundedExecutor = NewUnboundedExecutor()
-
-// NewUnboundedExecutor creates a new UnboundedExecutor,
-// UnboundedExecutor can not be created by &UnboundedExecutor{}
-// HandlePanic can be set with a callback to override global HandlePanic
-func NewUnboundedExecutor() *UnboundedExecutor {
-	ctx, cancel := context.WithCancel(context.TODO())
-	return &UnboundedExecutor{
-		ctx:                   ctx,
-		cancel:                cancel,
-		activeGoroutinesMutex: &sync.Mutex{},
-		activeGoroutines:      map[string]int{},
-	}
-}
-
-// Go starts a new goroutine and tracks its lifecycle.
-// Panic will be recovered and logged automatically, except for StopSignal
-func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
-	pc := reflect.ValueOf(handler).Pointer()
-	f := runtime.FuncForPC(pc)
-	funcName := f.Name()
-	file, line := f.FileLine(pc)
-	executor.activeGoroutinesMutex.Lock()
-	defer executor.activeGoroutinesMutex.Unlock()
-	startFrom := fmt.Sprintf("%s:%d", file, line)
-	executor.activeGoroutines[startFrom] += 1
-	go func() {
-		defer func() {
-			recovered := recover()
-			// if you want to quit a goroutine without trigger HandlePanic
-			// use runtime.Goexit() to quit
-			if recovered != nil {
-				if executor.HandlePanic == nil {
-					HandlePanic(recovered, funcName)
-				} else {
-					executor.HandlePanic(recovered, funcName)
-				}
-			}
-			executor.activeGoroutinesMutex.Lock()
-			executor.activeGoroutines[startFrom] -= 1
-			executor.activeGoroutinesMutex.Unlock()
-		}()
-		handler(executor.ctx)
-	}()
-}
-
-// Stop cancel all goroutines started by this executor without wait
-func (executor *UnboundedExecutor) Stop() {
-	executor.cancel()
-}
-
-// StopAndWaitForever cancel all goroutines started by this executor and
-// wait until all goroutines exited
-func (executor *UnboundedExecutor) StopAndWaitForever() {
-	executor.StopAndWait(context.Background())
-}
-
-// StopAndWait cancel all goroutines started by this executor and wait.
-// Wait can be cancelled by the context passed in.
-func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
-	executor.cancel()
-	for {
-		oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
-		select {
-		case <-oneHundredMilliseconds.C:
-			if executor.checkNoActiveGoroutines() {
-				return
-			}
-		case <-ctx.Done():
-			return
-		}
-	}
-}
-
-func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
-	executor.activeGoroutinesMutex.Lock()
-	defer executor.activeGoroutinesMutex.Unlock()
-	for startFrom, count := range executor.activeGoroutines {
-		if count > 0 {
-			InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
-				"startFrom", startFrom,
-				"count", count)
-			return false
-		}
-	}
-	return true
-}
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
deleted file mode 100644
index 7b26c94..0000000
--- a/vendor/github.com/modern-go/reflect2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/vendor
-/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
deleted file mode 100644
index fbb4374..0000000
--- a/vendor/github.com/modern-go/reflect2/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-
-go:
-  - 1.8.x
-  - 1.x
-
-before_install:
-  - go get -t -v ./...
-  - go get -t -v github.com/modern-go/reflect2-tests/...
-
-script:
-  - ./test.sh
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
deleted file mode 100644
index 2a3a698..0000000
--- a/vendor/github.com/modern-go/reflect2/Gopkg.lock
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "github.com/modern-go/concurrent"
-  packages = ["."]
-  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
-  version = "1.0.0"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
deleted file mode 100644
index 2f4f4db..0000000
--- a/vendor/github.com/modern-go/reflect2/Gopkg.toml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#   name = "github.com/x/y"
-#   version = "2.4.0"
-#
-# [prune]
-#   non-go = false
-#   go-tests = true
-#   unused-packages = true
-
-ignored = []
-
-[[constraint]]
-  name = "github.com/modern-go/concurrent"
-  version = "1.0.0"
-
-[prune]
-  go-tests = true
-  unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/github.com/modern-go/reflect2/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md
deleted file mode 100644
index 6f968aa..0000000
--- a/vendor/github.com/modern-go/reflect2/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# reflect2
-
-[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
-[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2)
-[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2)
-[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2)
-[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2)
-[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
-
-reflect api that avoids runtime reflect.Value cost
-
-* reflect get/set interface{}, with type checking
-* reflect get/set unsafe.Pointer, without type checking
-* `reflect2.TypeByName` works like `Class.forName` found in java
-
-[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
-This package is designed for low level libraries to optimize reflection performance.
-General application should still use reflect standard library.
-
-# reflect2.TypeByName
-
-```go
-// given package is github.com/your/awesome-package
-type MyStruct struct {
-	// ...
-}
-
-// will return the type
-reflect2.TypeByName("awesome-package.MyStruct")
-// however, if the type has not been used
-// it will be eliminated by compiler, so we can not get it in runtime
-```
-
-# reflect2 get/set interface{}
-
-```go
-valType := reflect2.TypeOf(1)
-i := 1
-j := 10
-valType.Set(&i, &j)
-// i will be 10
-```
-
-to get set `type`, always use its pointer `*type`
-
-# reflect2 get/set unsafe.Pointer
-
-```go
-valType := reflect2.TypeOf(1)
-i := 1
-j := 10
-valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
-// i will be 10
-```
-
-to get set `type`, always use its pointer `*type`
-
-# benchmark
-
-Benchmark is not necessary for this package. It does nothing actually.
-As it is just a thin wrapper to make go runtime public. 
-Both `reflect2` and `reflect` call same function 
-provided by `runtime` package exposed by go language.
-
-# unsafe safety
-
-Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
-We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
-only reflect2 need to be upgraded.
-
-reflect2 tries its best to keep the implementation same as reflect (by testing).
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go
deleted file mode 100644
index 5c1cea8..0000000
--- a/vendor/github.com/modern-go/reflect2/go_above_17.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//+build go1.7
-
-package reflect2
-
-import "unsafe"
-
-//go:linkname resolveTypeOff reflect.resolveTypeOff
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
deleted file mode 100644
index c7e3b78..0000000
--- a/vendor/github.com/modern-go/reflect2/go_above_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build go1.9
-
-package reflect2
-
-import (
-	"unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
-	return makemap(rtype, cap)
-}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go
deleted file mode 100644
index 65a93c8..0000000
--- a/vendor/github.com/modern-go/reflect2/go_below_17.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//+build !go1.7
-
-package reflect2
-
-import "unsafe"
-
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
-	return nil
-}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go
deleted file mode 100644
index b050ef7..0000000
--- a/vendor/github.com/modern-go/reflect2/go_below_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build !go1.9
-
-package reflect2
-
-import (
-	"unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
-	return makemap(rtype)
-}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
deleted file mode 100644
index 63b49c7..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package reflect2
-
-import (
-	"github.com/modern-go/concurrent"
-	"reflect"
-	"unsafe"
-)
-
-type Type interface {
-	Kind() reflect.Kind
-	// New return pointer to data of this type
-	New() interface{}
-	// UnsafeNew return the allocated space pointed by unsafe.Pointer
-	UnsafeNew() unsafe.Pointer
-	// PackEFace cast a unsafe pointer to object represented pointer
-	PackEFace(ptr unsafe.Pointer) interface{}
-	// Indirect dereference object represented pointer to this type
-	Indirect(obj interface{}) interface{}
-	// UnsafeIndirect dereference pointer to this type
-	UnsafeIndirect(ptr unsafe.Pointer) interface{}
-	// Type1 returns reflect.Type
-	Type1() reflect.Type
-	Implements(thatType Type) bool
-	String() string
-	RType() uintptr
-	// interface{} of this type has pointer like behavior
-	LikePtr() bool
-	IsNullable() bool
-	IsNil(obj interface{}) bool
-	UnsafeIsNil(ptr unsafe.Pointer) bool
-	Set(obj interface{}, val interface{})
-	UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
-	AssignableTo(anotherType Type) bool
-}
-
-type ListType interface {
-	Type
-	Elem() Type
-	SetIndex(obj interface{}, index int, elem interface{})
-	UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
-	GetIndex(obj interface{}, index int) interface{}
-	UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
-}
-
-type ArrayType interface {
-	ListType
-	Len() int
-}
-
-type SliceType interface {
-	ListType
-	MakeSlice(length int, cap int) interface{}
-	UnsafeMakeSlice(length int, cap int) unsafe.Pointer
-	Grow(obj interface{}, newLength int)
-	UnsafeGrow(ptr unsafe.Pointer, newLength int)
-	Append(obj interface{}, elem interface{})
-	UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
-	LengthOf(obj interface{}) int
-	UnsafeLengthOf(ptr unsafe.Pointer) int
-	SetNil(obj interface{})
-	UnsafeSetNil(ptr unsafe.Pointer)
-	Cap(obj interface{}) int
-	UnsafeCap(ptr unsafe.Pointer) int
-}
-
-type StructType interface {
-	Type
-	NumField() int
-	Field(i int) StructField
-	FieldByName(name string) StructField
-	FieldByIndex(index []int) StructField
-	FieldByNameFunc(match func(string) bool) StructField
-}
-
-type StructField interface {
-	Offset() uintptr
-	Name() string
-	PkgPath() string
-	Type() Type
-	Tag() reflect.StructTag
-	Index() []int
-	Anonymous() bool
-	Set(obj interface{}, value interface{})
-	UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
-	Get(obj interface{}) interface{}
-	UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
-}
-
-type MapType interface {
-	Type
-	Key() Type
-	Elem() Type
-	MakeMap(cap int) interface{}
-	UnsafeMakeMap(cap int) unsafe.Pointer
-	SetIndex(obj interface{}, key interface{}, elem interface{})
-	UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
-	TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
-	GetIndex(obj interface{}, key interface{}) interface{}
-	UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
-	Iterate(obj interface{}) MapIterator
-	UnsafeIterate(obj unsafe.Pointer) MapIterator
-}
-
-type MapIterator interface {
-	HasNext() bool
-	Next() (key interface{}, elem interface{})
-	UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
-}
-
-type PtrType interface {
-	Type
-	Elem() Type
-}
-
-type InterfaceType interface {
-	NumMethod() int
-}
-
-type Config struct {
-	UseSafeImplementation bool
-}
-
-type API interface {
-	TypeOf(obj interface{}) Type
-	Type2(type1 reflect.Type) Type
-}
-
-var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
-var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
-
-type frozenConfig struct {
-	useSafeImplementation bool
-	cache                 *concurrent.Map
-}
-
-func (cfg Config) Froze() *frozenConfig {
-	return &frozenConfig{
-		useSafeImplementation: cfg.UseSafeImplementation,
-		cache: concurrent.NewMap(),
-	}
-}
-
-func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
-	cacheKey := uintptr(unpackEFace(obj).rtype)
-	typeObj, found := cfg.cache.Load(cacheKey)
-	if found {
-		return typeObj.(Type)
-	}
-	return cfg.Type2(reflect.TypeOf(obj))
-}
-
-func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
-	if type1 == nil {
-		return nil
-	}
-	cacheKey := uintptr(unpackEFace(type1).data)
-	typeObj, found := cfg.cache.Load(cacheKey)
-	if found {
-		return typeObj.(Type)
-	}
-	type2 := cfg.wrapType(type1)
-	cfg.cache.Store(cacheKey, type2)
-	return type2
-}
-
-func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
-	safeType := safeType{Type: type1, cfg: cfg}
-	switch type1.Kind() {
-	case reflect.Struct:
-		if cfg.useSafeImplementation {
-			return &safeStructType{safeType}
-		}
-		return newUnsafeStructType(cfg, type1)
-	case reflect.Array:
-		if cfg.useSafeImplementation {
-			return &safeSliceType{safeType}
-		}
-		return newUnsafeArrayType(cfg, type1)
-	case reflect.Slice:
-		if cfg.useSafeImplementation {
-			return &safeSliceType{safeType}
-		}
-		return newUnsafeSliceType(cfg, type1)
-	case reflect.Map:
-		if cfg.useSafeImplementation {
-			return &safeMapType{safeType}
-		}
-		return newUnsafeMapType(cfg, type1)
-	case reflect.Ptr, reflect.Chan, reflect.Func:
-		if cfg.useSafeImplementation {
-			return &safeMapType{safeType}
-		}
-		return newUnsafePtrType(cfg, type1)
-	case reflect.Interface:
-		if cfg.useSafeImplementation {
-			return &safeMapType{safeType}
-		}
-		if type1.NumMethod() == 0 {
-			return newUnsafeEFaceType(cfg, type1)
-		}
-		return newUnsafeIFaceType(cfg, type1)
-	default:
-		if cfg.useSafeImplementation {
-			return &safeType
-		}
-		return newUnsafeType(cfg, type1)
-	}
-}
-
-func TypeOf(obj interface{}) Type {
-	return ConfigUnsafe.TypeOf(obj)
-}
-
-func TypeOfPtr(obj interface{}) PtrType {
-	return TypeOf(obj).(PtrType)
-}
-
-func Type2(type1 reflect.Type) Type {
-	if type1 == nil {
-		return nil
-	}
-	return ConfigUnsafe.Type2(type1)
-}
-
-func PtrTo(typ Type) Type {
-	return Type2(reflect.PtrTo(typ.Type1()))
-}
-
-func PtrOf(obj interface{}) unsafe.Pointer {
-	return unpackEFace(obj).data
-}
-
-func RTypeOf(obj interface{}) uintptr {
-	return uintptr(unpackEFace(obj).rtype)
-}
-
-func IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	return unpackEFace(obj).data == nil
-}
-
-func IsNullable(kind reflect.Kind) bool {
-	switch kind {
-	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
-		return true
-	}
-	return false
-}
-
-func likePtrKind(kind reflect.Kind) bool {
-	switch kind {
-	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
-		return true
-	}
-	return false
-}
-
-func likePtrType(typ reflect.Type) bool {
-	if likePtrKind(typ.Kind()) {
-		return true
-	}
-	if typ.Kind() == reflect.Struct {
-		if typ.NumField() != 1 {
-			return false
-		}
-		return likePtrType(typ.Field(0).Type)
-	}
-	if typ.Kind() == reflect.Array {
-		if typ.Len() != 1 {
-			return false
-		}
-		return likePtrType(typ.Elem())
-	}
-	return false
-}
-
-// NoEscape hides a pointer from escape analysis.  noescape is
-// the identity function but escape analysis doesn't think the
-// output depends on the input.  noescape is inlined and currently
-// compiles down to zero instructions.
-// USE CAREFULLY!
-//go:nosplit
-func NoEscape(p unsafe.Pointer) unsafe.Pointer {
-	x := uintptr(p)
-	return unsafe.Pointer(x ^ 0)
-}
-
-func UnsafeCastString(str string) []byte {
-	stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
-	sliceHeader := &reflect.SliceHeader{
-		Data: stringHeader.Data,
-		Cap: stringHeader.Len,
-		Len: stringHeader.Len,
-	}
-	return *(*[]byte)(unsafe.Pointer(sliceHeader))
-}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
deleted file mode 100644
index 62f299e..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2_kind.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-// DefaultTypeOfKind return the non aliased default type for the kind
-func DefaultTypeOfKind(kind reflect.Kind) Type {
-	return kindTypes[kind]
-}
-
-var kindTypes = map[reflect.Kind]Type{
-	reflect.Bool:          TypeOf(true),
-	reflect.Uint8:         TypeOf(uint8(0)),
-	reflect.Int8:          TypeOf(int8(0)),
-	reflect.Uint16:        TypeOf(uint16(0)),
-	reflect.Int16:         TypeOf(int16(0)),
-	reflect.Uint32:        TypeOf(uint32(0)),
-	reflect.Int32:         TypeOf(int32(0)),
-	reflect.Uint64:        TypeOf(uint64(0)),
-	reflect.Int64:         TypeOf(int64(0)),
-	reflect.Uint:          TypeOf(uint(0)),
-	reflect.Int:           TypeOf(int(0)),
-	reflect.Float32:       TypeOf(float32(0)),
-	reflect.Float64:       TypeOf(float64(0)),
-	reflect.Uintptr:       TypeOf(uintptr(0)),
-	reflect.String:        TypeOf(""),
-	reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
-}
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_386.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_arm.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go
deleted file mode 100644
index d4ba1f4..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_field.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type safeField struct {
-	reflect.StructField
-}
-
-func (field *safeField) Offset() uintptr {
-	return field.StructField.Offset
-}
-
-func (field *safeField) Name() string {
-	return field.StructField.Name
-}
-
-func (field *safeField) PkgPath() string {
-	return field.StructField.PkgPath
-}
-
-func (field *safeField) Type() Type {
-	panic("not implemented")
-}
-
-func (field *safeField) Tag() reflect.StructTag {
-	return field.StructField.Tag
-}
-
-func (field *safeField) Index() []int {
-	return field.StructField.Index
-}
-
-func (field *safeField) Anonymous() bool {
-	return field.StructField.Anonymous
-}
-
-func (field *safeField) Set(obj interface{}, value interface{}) {
-	val := reflect.ValueOf(obj).Elem()
-	val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
-}
-
-func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
-	panic("unsafe operation is not supported")
-}
-
-func (field *safeField) Get(obj interface{}) interface{} {
-	val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
-	ptr := reflect.New(val.Type())
-	ptr.Elem().Set(val)
-	return ptr.Interface()
-}
-
-func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go
deleted file mode 100644
index 8836220..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_map.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type safeMapType struct {
-	safeType
-}
-
-func (type2 *safeMapType) Key() Type {
-	return type2.safeType.cfg.Type2(type2.Type.Key())
-}
-
-func (type2 *safeMapType) MakeMap(cap int) interface{} {
-	ptr := reflect.New(type2.Type)
-	ptr.Elem().Set(reflect.MakeMap(type2.Type))
-	return ptr.Interface()
-}
-
-func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
-	keyVal := reflect.ValueOf(key)
-	elemVal := reflect.ValueOf(elem)
-	val := reflect.ValueOf(obj)
-	val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
-}
-
-func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
-	keyVal := reflect.ValueOf(key)
-	if key == nil {
-		keyVal = reflect.New(type2.Type.Key()).Elem()
-	}
-	val := reflect.ValueOf(obj).MapIndex(keyVal)
-	if !val.IsValid() {
-		return nil, false
-	}
-	return val.Interface(), true
-}
-
-func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
-	val := reflect.ValueOf(obj).Elem()
-	keyVal := reflect.ValueOf(key).Elem()
-	elemVal := val.MapIndex(keyVal)
-	if !elemVal.IsValid() {
-		ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
-		return ptr.Elem().Interface()
-	}
-	ptr := reflect.New(elemVal.Type())
-	ptr.Elem().Set(elemVal)
-	return ptr.Interface()
-}
-
-func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
-	m := reflect.ValueOf(obj).Elem()
-	return &safeMapIterator{
-		m:    m,
-		keys: m.MapKeys(),
-	}
-}
-
-func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
-	panic("does not support unsafe operation")
-}
-
-type safeMapIterator struct {
-	i    int
-	m    reflect.Value
-	keys []reflect.Value
-}
-
-func (iter *safeMapIterator) HasNext() bool {
-	return iter.i != len(iter.keys)
-}
-
-func (iter *safeMapIterator) Next() (interface{}, interface{}) {
-	key := iter.keys[iter.i]
-	elem := iter.m.MapIndex(key)
-	iter.i += 1
-	keyPtr := reflect.New(key.Type())
-	keyPtr.Elem().Set(key)
-	elemPtr := reflect.New(elem.Type())
-	elemPtr.Elem().Set(elem)
-	return keyPtr.Interface(), elemPtr.Interface()
-}
-
-func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go
deleted file mode 100644
index bcce6fd..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_slice.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type safeSliceType struct {
-	safeType
-}
-
-func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
-	val := reflect.ValueOf(obj).Elem()
-	elem := reflect.ValueOf(value).Elem()
-	val.Index(index).Set(elem)
-}
-
-func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
-	val := reflect.ValueOf(obj).Elem()
-	elem := val.Index(index)
-	ptr := reflect.New(elem.Type())
-	ptr.Elem().Set(elem)
-	return ptr.Interface()
-}
-
-func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
-	val := reflect.MakeSlice(type2.Type, length, cap)
-	ptr := reflect.New(val.Type())
-	ptr.Elem().Set(val)
-	return ptr.Interface()
-}
-
-func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
-	oldCap := type2.Cap(obj)
-	oldSlice := reflect.ValueOf(obj).Elem()
-	delta := newLength - oldCap
-	deltaVals := make([]reflect.Value, delta)
-	newSlice := reflect.Append(oldSlice, deltaVals...)
-	oldSlice.Set(newSlice)
-}
-
-func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
-	val := reflect.ValueOf(obj).Elem()
-	elemVal := reflect.ValueOf(elem).Elem()
-	newVal := reflect.Append(val, elemVal)
-	val.Set(newVal)
-}
-
-func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) SetNil(obj interface{}) {
-	val := reflect.ValueOf(obj).Elem()
-	val.Set(reflect.Zero(val.Type()))
-}
-
-func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) LengthOf(obj interface{}) int {
-	return reflect.ValueOf(obj).Elem().Len()
-}
-
-func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Cap(obj interface{}) int {
-	return reflect.ValueOf(obj).Elem().Cap()
-}
-
-func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
-	panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go
deleted file mode 100644
index e5fb9b3..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_struct.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package reflect2
-
-type safeStructType struct {
-	safeType
-}
-
-func (type2 *safeStructType) FieldByName(name string) StructField {
-	field, found := type2.Type.FieldByName(name)
-	if !found {
-		panic("field " + name + " not found")
-	}
-	return &safeField{StructField: field}
-}
-
-func (type2 *safeStructType) Field(i int) StructField {
-	return &safeField{StructField: type2.Type.Field(i)}
-}
-
-func (type2 *safeStructType) FieldByIndex(index []int) StructField {
-	return &safeField{StructField: type2.Type.FieldByIndex(index)}
-}
-
-func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
-	field, found := type2.Type.FieldByNameFunc(match)
-	if !found {
-		panic("field match condition not found in " + type2.Type.String())
-	}
-	return &safeField{StructField: field}
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
deleted file mode 100644
index ee4e7bb..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_type.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type safeType struct {
-	reflect.Type
-	cfg *frozenConfig
-}
-
-func (type2 *safeType) New() interface{} {
-	return reflect.New(type2.Type).Interface()
-}
-
-func (type2 *safeType) UnsafeNew() unsafe.Pointer {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Elem() Type {
-	return type2.cfg.Type2(type2.Type.Elem())
-}
-
-func (type2 *safeType) Type1() reflect.Type {
-	return type2.Type
-}
-
-func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Implements(thatType Type) bool {
-	return type2.Type.Implements(thatType.Type1())
-}
-
-func (type2 *safeType) RType() uintptr {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Indirect(obj interface{}) interface{} {
-	return reflect.Indirect(reflect.ValueOf(obj)).Interface()
-}
-
-func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) LikePtr() bool {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) IsNullable() bool {
-	return IsNullable(type2.Kind())
-}
-
-func (type2 *safeType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	return reflect.ValueOf(obj).Elem().IsNil()
-}
-
-func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Set(obj interface{}, val interface{}) {
-	reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
-}
-
-func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
-	panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) AssignableTo(anotherType Type) bool {
-	return type2.Type1().AssignableTo(anotherType.Type1())
-}
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
deleted file mode 100755
index 3d2b976..0000000
--- a/vendor/github.com/modern-go/reflect2/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
-    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
-    if [ -f profile.out ]; then
-        cat profile.out >> coverage.txt
-        rm profile.out
-    fi
-done
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
deleted file mode 100644
index 6d48911..0000000
--- a/vendor/github.com/modern-go/reflect2/type_map.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"runtime"
-	"strings"
-	"unsafe"
-)
-
-// typelinks1 for 1.5 ~ 1.6
-//go:linkname typelinks1 reflect.typelinks
-func typelinks1() [][]unsafe.Pointer
-
-// typelinks2 for 1.7 ~
-//go:linkname typelinks2 reflect.typelinks
-func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
-
-var types = map[string]reflect.Type{}
-var packages = map[string]map[string]reflect.Type{}
-
-func init() {
-	ver := runtime.Version()
-	if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
-		loadGo15Types()
-	} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
-		loadGo15Types()
-	} else {
-		loadGo17Types()
-	}
-}
-
-func loadGo15Types() {
-	var obj interface{} = reflect.TypeOf(0)
-	typePtrss := typelinks1()
-	for _, typePtrs := range typePtrss {
-		for _, typePtr := range typePtrs {
-			(*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
-			typ := obj.(reflect.Type)
-			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
-				loadedType := typ.Elem()
-				pkgTypes := packages[loadedType.PkgPath()]
-				if pkgTypes == nil {
-					pkgTypes = map[string]reflect.Type{}
-					packages[loadedType.PkgPath()] = pkgTypes
-				}
-				types[loadedType.String()] = loadedType
-				pkgTypes[loadedType.Name()] = loadedType
-			}
-			if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
-				typ.Elem().Elem().Kind() == reflect.Struct {
-				loadedType := typ.Elem().Elem()
-				pkgTypes := packages[loadedType.PkgPath()]
-				if pkgTypes == nil {
-					pkgTypes = map[string]reflect.Type{}
-					packages[loadedType.PkgPath()] = pkgTypes
-				}
-				types[loadedType.String()] = loadedType
-				pkgTypes[loadedType.Name()] = loadedType
-			}
-		}
-	}
-}
-
-func loadGo17Types() {
-	var obj interface{} = reflect.TypeOf(0)
-	sections, offset := typelinks2()
-	for i, offs := range offset {
-		rodata := sections[i]
-		for _, off := range offs {
-			(*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
-			typ := obj.(reflect.Type)
-			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
-				loadedType := typ.Elem()
-				pkgTypes := packages[loadedType.PkgPath()]
-				if pkgTypes == nil {
-					pkgTypes = map[string]reflect.Type{}
-					packages[loadedType.PkgPath()] = pkgTypes
-				}
-				types[loadedType.String()] = loadedType
-				pkgTypes[loadedType.Name()] = loadedType
-			}
-		}
-	}
-}
-
-type emptyInterface struct {
-	typ  unsafe.Pointer
-	word unsafe.Pointer
-}
-
-// TypeByName return the type by its name, just like Class.forName in java
-func TypeByName(typeName string) Type {
-	return Type2(types[typeName])
-}
-
-// TypeByPackageName return the type by its package and name
-func TypeByPackageName(pkgPath string, name string) Type {
-	pkgTypes := packages[pkgPath]
-	if pkgTypes == nil {
-		return nil
-	}
-	return Type2(pkgTypes[name])
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go
deleted file mode 100644
index 76cbdba..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_array.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type UnsafeArrayType struct {
-	unsafeType
-	elemRType  unsafe.Pointer
-	pElemRType unsafe.Pointer
-	elemSize   uintptr
-	likePtr    bool
-}
-
-func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
-	return &UnsafeArrayType{
-		unsafeType: *newUnsafeType(cfg, type1),
-		elemRType:  unpackEFace(type1.Elem()).data,
-		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
-		elemSize:   type1.Elem().Size(),
-		likePtr:    likePtrType(type1),
-	}
-}
-
-func (type2 *UnsafeArrayType) LikePtr() bool {
-	return type2.likePtr
-}
-
-func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	if type2.likePtr {
-		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-	}
-	return packEFace(type2.rtype, ptr)
-}
-
-func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	elemEFace := unpackEFace(elem)
-	assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
-	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
-}
-
-func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
-	elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
-	typedmemmove(type2.elemRType, elemPtr, elem)
-}
-
-func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
-	return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
-	return arrayAt(obj, index, type2.elemSize, "i < s.Len")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
deleted file mode 100644
index 805010f..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_eface.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type eface struct {
-	rtype unsafe.Pointer
-	data  unsafe.Pointer
-}
-
-func unpackEFace(obj interface{}) *eface {
-	return (*eface)(unsafe.Pointer(&obj))
-}
-
-func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
-	var i interface{}
-	e := (*eface)(unsafe.Pointer(&i))
-	e.rtype = rtype
-	e.data = data
-	return i
-}
-
-type UnsafeEFaceType struct {
-	unsafeType
-}
-
-func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
-	return &UnsafeEFaceType{
-		unsafeType: *newUnsafeType(cfg, type1),
-	}
-}
-
-func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	if ptr == nil {
-		return true
-	}
-	return unpackEFace(*(*interface{})(ptr)).data == nil
-}
-
-func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	return *(*interface{})(ptr)
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go
deleted file mode 100644
index 5eb5313..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_field.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type UnsafeStructField struct {
-	reflect.StructField
-	structType *UnsafeStructType
-	rtype      unsafe.Pointer
-	ptrRType   unsafe.Pointer
-}
-
-func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField {
-	return &UnsafeStructField{
-		StructField: structField,
-		rtype:       unpackEFace(structField.Type).data,
-		ptrRType:    unpackEFace(reflect.PtrTo(structField.Type)).data,
-		structType:  structType,
-	}
-}
-
-func (field *UnsafeStructField) Offset() uintptr {
-	return field.StructField.Offset
-}
-
-func (field *UnsafeStructField) Name() string {
-	return field.StructField.Name
-}
-
-func (field *UnsafeStructField) PkgPath() string {
-	return field.StructField.PkgPath
-}
-
-func (field *UnsafeStructField) Type() Type {
-	return field.structType.cfg.Type2(field.StructField.Type)
-}
-
-func (field *UnsafeStructField) Tag() reflect.StructTag {
-	return field.StructField.Tag
-}
-
-func (field *UnsafeStructField) Index() []int {
-	return field.StructField.Index
-}
-
-func (field *UnsafeStructField) Anonymous() bool {
-	return field.StructField.Anonymous
-}
-
-func (field *UnsafeStructField) Set(obj interface{}, value interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
-	valueEFace := unpackEFace(value)
-	assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype)
-	field.UnsafeSet(objEFace.data, valueEFace.data)
-}
-
-func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
-	fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field")
-	typedmemmove(field.rtype, fieldPtr, value)
-}
-
-func (field *UnsafeStructField) Get(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
-	value := field.UnsafeGet(objEFace.data)
-	return packEFace(field.ptrRType, value)
-}
-
-func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
-	return add(obj, field.StructField.Offset, "same as non-reflect &v.field")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
deleted file mode 100644
index b601955..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_iface.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type iface struct {
-	itab *itab
-	data unsafe.Pointer
-}
-
-type itab struct {
-	ignore unsafe.Pointer
-	rtype  unsafe.Pointer
-}
-
-func IFaceToEFace(ptr unsafe.Pointer) interface{} {
-	iface := (*iface)(ptr)
-	if iface.itab == nil {
-		return nil
-	}
-	return packEFace(iface.itab.rtype, iface.data)
-}
-
-type UnsafeIFaceType struct {
-	unsafeType
-}
-
-func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType {
-	return &UnsafeIFaceType{
-		unsafeType: *newUnsafeType(cfg, type1),
-	}
-}
-
-func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	return IFaceToEFace(ptr)
-}
-
-func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	if ptr == nil {
-		return true
-	}
-	iface := (*iface)(ptr)
-	if iface.itab == nil {
-		return true
-	}
-	return false
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
deleted file mode 100644
index 57229c8..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_link.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package reflect2
-
-import "unsafe"
-
-//go:linkname unsafe_New reflect.unsafe_New
-func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer
-
-//go:linkname typedmemmove reflect.typedmemmove
-func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer)
-
-//go:linkname unsafe_NewArray reflect.unsafe_NewArray
-func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
-
-// typedslicecopy copies a slice of elemType values from src to dst,
-// returning the number of elements copied.
-//go:linkname typedslicecopy reflect.typedslicecopy
-//go:noescape
-func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
-
-//go:linkname mapassign reflect.mapassign
-//go:noescape
-func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
-
-//go:linkname mapaccess reflect.mapaccess
-//go:noescape
-func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
-//go:noescape
-//go:linkname mapiterinit reflect.mapiterinit
-func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
-
-//go:noescape
-//go:linkname mapiternext reflect.mapiternext
-func mapiternext(it *hiter)
-
-//go:linkname ifaceE2I reflect.ifaceE2I
-func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
-// the layout of this structure.
-type hiter struct {
-	key   unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
-	value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
-	// rest fields are ignored
-}
-
-// add returns p+x.
-//
-// The whySafe string is ignored, so that the function still inlines
-// as efficiently as p+x, but all call sites should use the string to
-// record why the addition is safe, which is to say why the addition
-// does not cause x to advance to the very end of p's allocation
-// and therefore point incorrectly at the next block in memory.
-func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
-	return unsafe.Pointer(uintptr(p) + x)
-}
-
-// arrayAt returns the i-th element of p,
-// an array whose elements are eltSize bytes wide.
-// The array pointed at by p must have at least i+1 elements:
-// it is invalid (but impossible to check here) to pass i >= len,
-// because then the result will point outside the array.
-// whySafe must explain why i < len. (Passing "i < len" is fine;
-// the benefit is to surface this assumption at the call site.)
-func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
-	return add(p, uintptr(i)*eltSize, "i < len")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
deleted file mode 100644
index f2e76e6..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_map.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type UnsafeMapType struct {
-	unsafeType
-	pKeyRType  unsafe.Pointer
-	pElemRType unsafe.Pointer
-}
-
-func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType {
-	return &UnsafeMapType{
-		unsafeType: *newUnsafeType(cfg, type1),
-		pKeyRType:  unpackEFace(reflect.PtrTo(type1.Key())).data,
-		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
-	}
-}
-
-func (type2 *UnsafeMapType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	if ptr == nil {
-		return true
-	}
-	return *(*unsafe.Pointer)(ptr) == nil
-}
-
-func (type2 *UnsafeMapType) LikePtr() bool {
-	return true
-}
-
-func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-}
-
-func (type2 *UnsafeMapType) Key() Type {
-	return type2.cfg.Type2(type2.Type.Key())
-}
-
-func (type2 *UnsafeMapType) MakeMap(cap int) interface{} {
-	return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap))
-}
-
-func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
-	m := makeMapWithSize(type2.rtype, cap)
-	return unsafe.Pointer(&m)
-}
-
-func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	keyEFace := unpackEFace(key)
-	assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
-	elemEFace := unpackEFace(elem)
-	assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
-	type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
-	mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem)
-}
-
-func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
-	objEFace := unpackEFace(obj)
-	assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	keyEFace := unpackEFace(key)
-	assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
-	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
-	if elemPtr == nil {
-		return nil, false
-	}
-	return packEFace(type2.pElemRType, elemPtr), true
-}
-
-func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	keyEFace := unpackEFace(key)
-	assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
-	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
-	return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
-	return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key)
-}
-
-func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
-	objEFace := unpackEFace(obj)
-	assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIterate(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
-	return &UnsafeMapIterator{
-		hiter:      mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
-		pKeyRType:  type2.pKeyRType,
-		pElemRType: type2.pElemRType,
-	}
-}
-
-type UnsafeMapIterator struct {
-	*hiter
-	pKeyRType  unsafe.Pointer
-	pElemRType unsafe.Pointer
-}
-
-func (iter *UnsafeMapIterator) HasNext() bool {
-	return iter.key != nil
-}
-
-func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) {
-	key, elem := iter.UnsafeNext()
-	return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem)
-}
-
-func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
-	key := iter.key
-	elem := iter.value
-	mapiternext(iter.hiter)
-	return key, elem
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
deleted file mode 100644
index 8e5ec9c..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type UnsafePtrType struct {
-	unsafeType
-}
-
-func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType {
-	return &UnsafePtrType{
-		unsafeType: *newUnsafeType(cfg, type1),
-	}
-}
-
-func (type2 *UnsafePtrType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	if ptr == nil {
-		return true
-	}
-	return *(*unsafe.Pointer)(ptr) == nil
-}
-
-func (type2 *UnsafePtrType) LikePtr() bool {
-	return true
-}
-
-func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
deleted file mode 100644
index 1c6d876..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_slice.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-// sliceHeader is a safe version of SliceHeader used within this package.
-type sliceHeader struct {
-	Data unsafe.Pointer
-	Len  int
-	Cap  int
-}
-
-type UnsafeSliceType struct {
-	unsafeType
-	elemRType  unsafe.Pointer
-	pElemRType unsafe.Pointer
-	elemSize   uintptr
-}
-
-func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType {
-	elemType := type1.Elem()
-	return &UnsafeSliceType{
-		unsafeType: *newUnsafeType(cfg, type1),
-		pElemRType: unpackEFace(reflect.PtrTo(elemType)).data,
-		elemRType:  unpackEFace(elemType).data,
-		elemSize:   elemType.Size(),
-	}
-}
-
-func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
-	valEFace := unpackEFace(val)
-	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
-	type2.UnsafeSet(objEFace.data, valEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
-	*(*sliceHeader)(ptr) = *(*sliceHeader)(val)
-}
-
-func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool {
-	if obj == nil {
-		return true
-	}
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	if ptr == nil {
-		return true
-	}
-	return (*sliceHeader)(ptr).Data == nil
-}
-
-func (type2 *UnsafeSliceType) SetNil(obj interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype)
-	type2.UnsafeSetNil(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
-	header := (*sliceHeader)(ptr)
-	header.Len = 0
-	header.Cap = 0
-	header.Data = nil
-}
-
-func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} {
-	return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap))
-}
-
-func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
-	header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap}
-	return unsafe.Pointer(header)
-}
-
-func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeLengthOf(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int {
-	header := (*sliceHeader)(obj)
-	return header.Len
-}
-
-func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	elemEFace := unpackEFace(elem)
-	assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
-	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
-	header := (*sliceHeader)(obj)
-	elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
-	typedmemmove(type2.elemRType, elemPtr, elem)
-}
-
-func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
-	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
-	return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
-	header := (*sliceHeader)(obj)
-	return arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
-}
-
-func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype)
-	elemEFace := unpackEFace(elem)
-	assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype)
-	type2.UnsafeAppend(objEFace.data, elemEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
-	header := (*sliceHeader)(obj)
-	oldLen := header.Len
-	type2.UnsafeGrow(obj, oldLen+1)
-	type2.UnsafeSetIndex(obj, oldLen, elem)
-}
-
-func (type2 *UnsafeSliceType) Cap(obj interface{}) int {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeCap(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
-	return (*sliceHeader)(ptr).Cap
-}
-
-func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) {
-	objEFace := unpackEFace(obj)
-	assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype)
-	type2.UnsafeGrow(objEFace.data, newLength)
-}
-
-func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) {
-	header := (*sliceHeader)(obj)
-	if newLength <= header.Cap {
-		header.Len = newLength
-		return
-	}
-	newCap := calcNewCap(header.Cap, newLength)
-	newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap))
-	typedslicecopy(type2.elemRType, *newHeader, *header)
-	header.Data = newHeader.Data
-	header.Cap = newHeader.Cap
-	header.Len = newLength
-}
-
-func calcNewCap(cap int, expectedCap int) int {
-	if cap == 0 {
-		cap = expectedCap
-	} else {
-		for cap < expectedCap {
-			if cap < 1024 {
-				cap += cap
-			} else {
-				cap += cap / 4
-			}
-		}
-	}
-	return cap
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
deleted file mode 100644
index 804d916..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_struct.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type UnsafeStructType struct {
-	unsafeType
-	likePtr bool
-}
-
-func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType {
-	return &UnsafeStructType{
-		unsafeType: *newUnsafeType(cfg, type1),
-		likePtr:    likePtrType(type1),
-	}
-}
-
-func (type2 *UnsafeStructType) LikePtr() bool {
-	return type2.likePtr
-}
-
-func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
-	if type2.likePtr {
-		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-	}
-	return packEFace(type2.rtype, ptr)
-}
-
-func (type2 *UnsafeStructType) FieldByName(name string) StructField {
-	structField, found := type2.Type.FieldByName(name)
-	if !found {
-		return nil
-	}
-	return newUnsafeStructField(type2, structField)
-}
-
-func (type2 *UnsafeStructType) Field(i int) StructField {
-	return newUnsafeStructField(type2, type2.Type.Field(i))
-}
-
-func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField {
-	return newUnsafeStructField(type2, type2.Type.FieldByIndex(index))
-}
-
-func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField {
-	structField, found := type2.Type.FieldByNameFunc(match)
-	if !found {
-		panic("field match condition not found in " + type2.Type.String())
-	}
-	return newUnsafeStructField(type2, structField)
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go
deleted file mode 100644
index 1394171..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_type.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package reflect2
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-type unsafeType struct {
-	safeType
-	rtype    unsafe.Pointer
-	ptrRType unsafe.Pointer
-}
-
-func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType {
-	return &unsafeType{
-		safeType: safeType{
-			Type: type1,
-			cfg:  cfg,
-		},
-		rtype:    unpackEFace(type1).data,
-		ptrRType: unpackEFace(reflect.PtrTo(type1)).data,
-	}
-}
-
-func (type2 *unsafeType) Set(obj interface{}, val interface{}) {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
-	valEFace := unpackEFace(val)
-	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
-	type2.UnsafeSet(objEFace.data, valEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
-	typedmemmove(type2.rtype, ptr, val)
-}
-
-func (type2 *unsafeType) IsNil(obj interface{}) bool {
-	objEFace := unpackEFace(obj)
-	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
-	return ptr == nil
-}
-
-func (type2 *unsafeType) UnsafeNew() unsafe.Pointer {
-	return unsafe_New(type2.rtype)
-}
-
-func (type2 *unsafeType) New() interface{} {
-	return packEFace(type2.ptrRType, type2.UnsafeNew())
-}
-
-func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} {
-	return packEFace(type2.ptrRType, ptr)
-}
-
-func (type2 *unsafeType) RType() uintptr {
-	return uintptr(type2.rtype)
-}
-
-func (type2 *unsafeType) Indirect(obj interface{}) interface{} {
-	objEFace := unpackEFace(obj)
-	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
-	return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} {
-	return packEFace(type2.rtype, obj)
-}
-
-func (type2 *unsafeType) LikePtr() bool {
-	return false
-}
-
-func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) {
-	if expectRType != actualRType {
-		expectType := reflect.TypeOf(0)
-		(*iface)(unsafe.Pointer(&expectType)).data = expectRType
-		actualType := reflect.TypeOf(0)
-		(*iface)(unsafe.Pointer(&actualType)).data = actualRType
-		panic(where + ": expect " + expectType.String() + ", actual " + actualType.String())
-	}
-}
diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS
deleted file mode 100644
index 78d1de4..0000000
--- a/vendor/github.com/petar/GoLLRB/AUTHORS
+++ /dev/null
@@ -1,4 +0,0 @@
-Petar Maymounkov <petar@5ttt.org>
-Vadim Vygonets <vadik@vygo.net>
-Ian Smith <iansmith@acm.org>
-Martin Bruse
diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE
deleted file mode 100644
index b75312c..0000000
--- a/vendor/github.com/petar/GoLLRB/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2010, Petar Maymounkov
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-(*) Redistributions of source code must retain the above copyright notice, this list
-of conditions and the following disclaimer.
-
-(*) Redistributions in binary form must reproduce the above copyright notice, this
-list of conditions and the following disclaimer in the documentation and/or
-other materials provided with the distribution.
-
-(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
-used to endorse or promote products derived from this software without specific
-prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
deleted file mode 100644
index 2d7e2a3..0000000
--- a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2010 Petar Maymounkov. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package llrb
-
-import "math"
-
-// avgVar maintains the average and variance of a stream of numbers
-// in a space-efficient manner.
-type avgVar struct {
-	count      int64
-	sum, sumsq float64
-}
-
-func (av *avgVar) Init() {
-	av.count = 0
-	av.sum = 0.0
-	av.sumsq = 0.0
-}
-
-func (av *avgVar) Add(sample float64) {
-	av.count++
-	av.sum += sample
-	av.sumsq += sample * sample
-}
-
-func (av *avgVar) GetCount() int64 { return av.count }
-
-func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
-
-func (av *avgVar) GetTotal() float64 { return av.sum }
-
-func (av *avgVar) GetVar() float64 {
-	a := av.GetAvg()
-	return av.sumsq/float64(av.count) - a*a
-}
-
-func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go
deleted file mode 100644
index ee7b27f..0000000
--- a/vendor/github.com/petar/GoLLRB/llrb/iterator.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package llrb
-
-type ItemIterator func(i Item) bool
-
-//func (t *Tree) Ascend(iterator ItemIterator) {
-//	t.AscendGreaterOrEqual(Inf(-1), iterator)
-//}
-
-func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
-	t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
-}
-
-func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
-	if h == nil {
-		return true
-	}
-	if !less(h.Item, sup) {
-		return t.ascendRange(h.Left, inf, sup, iterator)
-	}
-	if less(h.Item, inf) {
-		return t.ascendRange(h.Right, inf, sup, iterator)
-	}
-
-	if !t.ascendRange(h.Left, inf, sup, iterator) {
-		return false
-	}
-	if !iterator(h.Item) {
-		return false
-	}
-	return t.ascendRange(h.Right, inf, sup, iterator)
-}
-
-// AscendGreaterOrEqual will call iterator once for each element greater or equal to
-// pivot in ascending order. It will stop whenever the iterator returns false.
-func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
-	t.ascendGreaterOrEqual(t.root, pivot, iterator)
-}
-
-func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
-	if h == nil {
-		return true
-	}
-	if !less(h.Item, pivot) {
-		if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
-			return false
-		}
-		if !iterator(h.Item) {
-			return false
-		}
-	}
-	return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
-}
-
-func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
-	t.ascendLessThan(t.root, pivot, iterator)
-}
-
-func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
-	if h == nil {
-		return true
-	}
-	if !t.ascendLessThan(h.Left, pivot, iterator) {
-		return false
-	}
-	if !iterator(h.Item) {
-		return false
-	}
-	if less(h.Item, pivot) {
-		return t.ascendLessThan(h.Left, pivot, iterator)
-	}
-	return true
-}
-
-// DescendLessOrEqual will call iterator once for each element less than the
-// pivot in descending order. It will stop whenever the iterator returns false.
-func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
-	t.descendLessOrEqual(t.root, pivot, iterator)
-}
-
-func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
-	if h == nil {
-		return true
-	}
-	if less(h.Item, pivot) || !less(pivot, h.Item) {
-		if !t.descendLessOrEqual(h.Right, pivot, iterator) {
-			return false
-		}
-		if !iterator(h.Item) {
-			return false
-		}
-	}
-	return t.descendLessOrEqual(h.Left, pivot, iterator)
-}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
deleted file mode 100644
index 47126a3..0000000
--- a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 Petar Maymounkov. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package llrb
-
-// GetHeight() returns an item in the tree with key @key, and it's height in the tree
-func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
-	return t.getHeight(t.root, key)
-}
-
-func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
-	if h == nil {
-		return nil, 0
-	}
-	if less(item, h.Item) {
-		result, depth := t.getHeight(h.Left, item)
-		return result, depth + 1
-	}
-	if less(h.Item, item) {
-		result, depth := t.getHeight(h.Right, item)
-		return result, depth + 1
-	}
-	return h.Item, 0
-}
-
-// HeightStats() returns the average and standard deviation of the height
-// of elements in the tree
-func (t *LLRB) HeightStats() (avg, stddev float64) {
-	av := &avgVar{}
-	heightStats(t.root, 0, av)
-	return av.GetAvg(), av.GetStdDev()
-}
-
-func heightStats(h *Node, d int, av *avgVar) {
-	if h == nil {
-		return
-	}
-	av.Add(float64(d))
-	if h.Left != nil {
-		heightStats(h.Left, d+1, av)
-	}
-	if h.Right != nil {
-		heightStats(h.Right, d+1, av)
-	}
-}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go
deleted file mode 100644
index 81373fb..0000000
--- a/vendor/github.com/petar/GoLLRB/llrb/llrb.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2010 Petar Maymounkov. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
-// based on the following work:
-//
-//   http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
-//   http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
-//   http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
-//
-//  2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
-//  algoritms found in implementations of Python, Java, and other libraries. The LLRB
-//  implementation of 2-3 trees is a recent improvement on the traditional implementation,
-//  observed and documented by Robert Sedgewick.
-//
-package llrb
-
-// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
-type LLRB struct {
-	count int
-	root  *Node
-}
-
-type Node struct {
-	Item
-	Left, Right *Node // Pointers to left and right child nodes
-	Black       bool  // If set, the color of the link (incoming from the parent) is black
-	// In the LLRB, new nodes are always red, hence the zero-value for node
-}
-
-type Item interface {
-	Less(than Item) bool
-}
-
-//
-func less(x, y Item) bool {
-	if x == pinf {
-		return false
-	}
-	if x == ninf {
-		return true
-	}
-	return x.Less(y)
-}
-
-// Inf returns an Item that is "bigger than" any other item, if sign is positive.
-// Otherwise  it returns an Item that is "smaller than" any other item.
-func Inf(sign int) Item {
-	if sign == 0 {
-		panic("sign")
-	}
-	if sign > 0 {
-		return pinf
-	}
-	return ninf
-}
-
-var (
-	ninf = nInf{}
-	pinf = pInf{}
-)
-
-type nInf struct{}
-
-func (nInf) Less(Item) bool {
-	return true
-}
-
-type pInf struct{}
-
-func (pInf) Less(Item) bool {
-	return false
-}
-
-// New() allocates a new tree
-func New() *LLRB {
-	return &LLRB{}
-}
-
-// SetRoot sets the root node of the tree.
-// It is intended to be used by functions that deserialize the tree.
-func (t *LLRB) SetRoot(r *Node) {
-	t.root = r
-}
-
-// Root returns the root node of the tree.
-// It is intended to be used by functions that serialize the tree.
-func (t *LLRB) Root() *Node {
-	return t.root
-}
-
-// Len returns the number of nodes in the tree.
-func (t *LLRB) Len() int { return t.count }
-
-// Has returns true if the tree contains an element whose order is the same as that of key.
-func (t *LLRB) Has(key Item) bool {
-	return t.Get(key) != nil
-}
-
-// Get retrieves an element from the tree whose order is the same as that of key.
-func (t *LLRB) Get(key Item) Item {
-	h := t.root
-	for h != nil {
-		switch {
-		case less(key, h.Item):
-			h = h.Left
-		case less(h.Item, key):
-			h = h.Right
-		default:
-			return h.Item
-		}
-	}
-	return nil
-}
-
-// Min returns the minimum element in the tree.
-func (t *LLRB) Min() Item {
-	h := t.root
-	if h == nil {
-		return nil
-	}
-	for h.Left != nil {
-		h = h.Left
-	}
-	return h.Item
-}
-
-// Max returns the maximum element in the tree.
-func (t *LLRB) Max() Item {
-	h := t.root
-	if h == nil {
-		return nil
-	}
-	for h.Right != nil {
-		h = h.Right
-	}
-	return h.Item
-}
-
-func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
-	for _, i := range items {
-		t.ReplaceOrInsert(i)
-	}
-}
-
-func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
-	for _, i := range items {
-		t.InsertNoReplace(i)
-	}
-}
-
-// ReplaceOrInsert inserts item into the tree. If an existing
-// element has the same order, it is removed from the tree and returned.
-func (t *LLRB) ReplaceOrInsert(item Item) Item {
-	if item == nil {
-		panic("inserting nil item")
-	}
-	var replaced Item
-	t.root, replaced = t.replaceOrInsert(t.root, item)
-	t.root.Black = true
-	if replaced == nil {
-		t.count++
-	}
-	return replaced
-}
-
-func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
-	if h == nil {
-		return newNode(item), nil
-	}
-
-	h = walkDownRot23(h)
-
-	var replaced Item
-	if less(item, h.Item) { // BUG
-		h.Left, replaced = t.replaceOrInsert(h.Left, item)
-	} else if less(h.Item, item) {
-		h.Right, replaced = t.replaceOrInsert(h.Right, item)
-	} else {
-		replaced, h.Item = h.Item, item
-	}
-
-	h = walkUpRot23(h)
-
-	return h, replaced
-}
-
-// InsertNoReplace inserts item into the tree. If an existing
-// element has the same order, both elements remain in the tree.
-func (t *LLRB) InsertNoReplace(item Item) {
-	if item == nil {
-		panic("inserting nil item")
-	}
-	t.root = t.insertNoReplace(t.root, item)
-	t.root.Black = true
-	t.count++
-}
-
-func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
-	if h == nil {
-		return newNode(item)
-	}
-
-	h = walkDownRot23(h)
-
-	if less(item, h.Item) {
-		h.Left = t.insertNoReplace(h.Left, item)
-	} else {
-		h.Right = t.insertNoReplace(h.Right, item)
-	}
-
-	return walkUpRot23(h)
-}
-
-// Rotation driver routines for 2-3 algorithm
-
-func walkDownRot23(h *Node) *Node { return h }
-
-func walkUpRot23(h *Node) *Node {
-	if isRed(h.Right) && !isRed(h.Left) {
-		h = rotateLeft(h)
-	}
-
-	if isRed(h.Left) && isRed(h.Left.Left) {
-		h = rotateRight(h)
-	}
-
-	if isRed(h.Left) && isRed(h.Right) {
-		flip(h)
-	}
-
-	return h
-}
-
-// Rotation driver routines for 2-3-4 algorithm
-
-func walkDownRot234(h *Node) *Node {
-	if isRed(h.Left) && isRed(h.Right) {
-		flip(h)
-	}
-
-	return h
-}
-
-func walkUpRot234(h *Node) *Node {
-	if isRed(h.Right) && !isRed(h.Left) {
-		h = rotateLeft(h)
-	}
-
-	if isRed(h.Left) && isRed(h.Left.Left) {
-		h = rotateRight(h)
-	}
-
-	return h
-}
-
-// DeleteMin deletes the minimum element in the tree and returns the
-// deleted item or nil otherwise.
-func (t *LLRB) DeleteMin() Item {
-	var deleted Item
-	t.root, deleted = deleteMin(t.root)
-	if t.root != nil {
-		t.root.Black = true
-	}
-	if deleted != nil {
-		t.count--
-	}
-	return deleted
-}
-
-// deleteMin code for LLRB 2-3 trees
-func deleteMin(h *Node) (*Node, Item) {
-	if h == nil {
-		return nil, nil
-	}
-	if h.Left == nil {
-		return nil, h.Item
-	}
-
-	if !isRed(h.Left) && !isRed(h.Left.Left) {
-		h = moveRedLeft(h)
-	}
-
-	var deleted Item
-	h.Left, deleted = deleteMin(h.Left)
-
-	return fixUp(h), deleted
-}
-
-// DeleteMax deletes the maximum element in the tree and returns
-// the deleted item or nil otherwise
-func (t *LLRB) DeleteMax() Item {
-	var deleted Item
-	t.root, deleted = deleteMax(t.root)
-	if t.root != nil {
-		t.root.Black = true
-	}
-	if deleted != nil {
-		t.count--
-	}
-	return deleted
-}
-
-func deleteMax(h *Node) (*Node, Item) {
-	if h == nil {
-		return nil, nil
-	}
-	if isRed(h.Left) {
-		h = rotateRight(h)
-	}
-	if h.Right == nil {
-		return nil, h.Item
-	}
-	if !isRed(h.Right) && !isRed(h.Right.Left) {
-		h = moveRedRight(h)
-	}
-	var deleted Item
-	h.Right, deleted = deleteMax(h.Right)
-
-	return fixUp(h), deleted
-}
-
-// Delete deletes an item from the tree whose key equals key.
-// The deleted item is return, otherwise nil is returned.
-func (t *LLRB) Delete(key Item) Item {
-	var deleted Item
-	t.root, deleted = t.delete(t.root, key)
-	if t.root != nil {
-		t.root.Black = true
-	}
-	if deleted != nil {
-		t.count--
-	}
-	return deleted
-}
-
-func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
-	var deleted Item
-	if h == nil {
-		return nil, nil
-	}
-	if less(item, h.Item) {
-		if h.Left == nil { // item not present. Nothing to delete
-			return h, nil
-		}
-		if !isRed(h.Left) && !isRed(h.Left.Left) {
-			h = moveRedLeft(h)
-		}
-		h.Left, deleted = t.delete(h.Left, item)
-	} else {
-		if isRed(h.Left) {
-			h = rotateRight(h)
-		}
-		// If @item equals @h.Item and no right children at @h
-		if !less(h.Item, item) && h.Right == nil {
-			return nil, h.Item
-		}
-		// PETAR: Added 'h.Right != nil' below
-		if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
-			h = moveRedRight(h)
-		}
-		// If @item equals @h.Item, and (from above) 'h.Right != nil'
-		if !less(h.Item, item) {
-			var subDeleted Item
-			h.Right, subDeleted = deleteMin(h.Right)
-			if subDeleted == nil {
-				panic("logic")
-			}
-			deleted, h.Item = h.Item, subDeleted
-		} else { // Else, @item is bigger than @h.Item
-			h.Right, deleted = t.delete(h.Right, item)
-		}
-	}
-
-	return fixUp(h), deleted
-}
-
-// Internal node manipulation routines
-
-func newNode(item Item) *Node { return &Node{Item: item} }
-
-func isRed(h *Node) bool {
-	if h == nil {
-		return false
-	}
-	return !h.Black
-}
-
-func rotateLeft(h *Node) *Node {
-	x := h.Right
-	if x.Black {
-		panic("rotating a black link")
-	}
-	h.Right = x.Left
-	x.Left = h
-	x.Black = h.Black
-	h.Black = false
-	return x
-}
-
-func rotateRight(h *Node) *Node {
-	x := h.Left
-	if x.Black {
-		panic("rotating a black link")
-	}
-	h.Left = x.Right
-	x.Right = h
-	x.Black = h.Black
-	h.Black = false
-	return x
-}
-
-// REQUIRE: Left and Right children must be present
-func flip(h *Node) {
-	h.Black = !h.Black
-	h.Left.Black = !h.Left.Black
-	h.Right.Black = !h.Right.Black
-}
-
-// REQUIRE: Left and Right children must be present
-func moveRedLeft(h *Node) *Node {
-	flip(h)
-	if isRed(h.Right.Left) {
-		h.Right = rotateRight(h.Right)
-		h = rotateLeft(h)
-		flip(h)
-	}
-	return h
-}
-
-// REQUIRE: Left and Right children must be present
-func moveRedRight(h *Node) *Node {
-	flip(h)
-	if isRed(h.Left.Left) {
-		h = rotateRight(h)
-		flip(h)
-	}
-	return h
-}
-
-func fixUp(h *Node) *Node {
-	if isRed(h.Right) {
-		h = rotateLeft(h)
-	}
-
-	if isRed(h.Left) && isRed(h.Left.Left) {
-		h = rotateRight(h)
-	}
-
-	if isRed(h.Left) && isRed(h.Right) {
-		flip(h)
-	}
-
-	return h
-}
diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go
deleted file mode 100644
index 63dbdb2..0000000
--- a/vendor/github.com/petar/GoLLRB/llrb/util.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2010 Petar Maymounkov. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package llrb
-
-type Int int
-
-func (x Int) Less(than Item) bool {
-	return x < than.(Int)
-}
-
-type String string
-
-func (x String) Less(than Item) bool {
-	return x < than.(String)
-}
diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE
deleted file mode 100644
index 41ce7f1..0000000
--- a/vendor/github.com/peterbourgon/diskv/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2011-2012 Peter Bourgon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md
deleted file mode 100644
index 3474739..0000000
--- a/vendor/github.com/peterbourgon/diskv/README.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# What is diskv?
-
-Diskv (disk-vee) is a simple, persistent key-value store written in the Go
-language. It starts with an incredibly simple API for storing arbitrary data on
-a filesystem by key, and builds several layers of performance-enhancing
-abstraction on top.  The end result is a conceptually simple, but highly
-performant, disk-backed storage system.
-
-[![Build Status][1]][2]
-
-[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
-[2]: https://drone.io/github.com/peterbourgon/diskv/latest
-
-
-# Installing
-
-Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
-Then,
-
-```bash
-$ go get github.com/peterbourgon/diskv
-```
-
-[3]: http://golang.org
-[4]: http://golang.org/doc/install/source
-[5]: http://golang.org/doc/install
-
-
-# Usage
-
-```go
-package main
-
-import (
-	"fmt"
-	"github.com/peterbourgon/diskv"
-)
-
-func main() {
-	// Simplest transform function: put all the data files into the base dir.
-	flatTransform := func(s string) []string { return []string{} }
-
-	// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
-	d := diskv.New(diskv.Options{
-		BasePath:     "my-data-dir",
-		Transform:    flatTransform,
-		CacheSizeMax: 1024 * 1024,
-	})
-
-	// Write three bytes to the key "alpha".
-	key := "alpha"
-	d.Write(key, []byte{'1', '2', '3'})
-
-	// Read the value back out of the store.
-	value, _ := d.Read(key)
-	fmt.Printf("%v\n", value)
-
-	// Erase the key+value from the store (and the disk).
-	d.Erase(key)
-}
-```
-
-More complex examples can be found in the "examples" subdirectory.
-
-
-# Theory
-
-## Basic idea
-
-At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
-The data is written to a single file on disk, with the same name as the key.
-The key determines where that file will be stored, via a user-provided
-`TransformFunc`, which takes a key and returns a slice (`[]string`)
-corresponding to a path list where the key file will be stored. The simplest
-TransformFunc,
-
-```go
-func SimpleTransform (key string) []string {
-    return []string{}
-}
-```
-
-will place all keys in the same, base directory. The design is inspired by
-[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
-behavior is available in the content-addressable-storage example.
-
-[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
-
-**Note** that your TransformFunc should ensure that one valid key doesn't
-transform to a subset of another valid key. That is, it shouldn't be possible
-to construct valid keys that resolve to directory names. As a concrete example,
-if your TransformFunc splits on every 3 characters, then
-
-```go
-d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
-d.Write("abc", val)    // Error: attempted write to <base>/abc/abc, but it's a directory
-```
-
-This will be addressed in an upcoming version of diskv.
-
-Probably the most important design principle behind diskv is that your data is
-always flatly available on the disk. diskv will never do anything that would
-prevent you from accessing, copying, backing up, or otherwise interacting with
-your data via common UNIX commandline tools.
-
-## Adding a cache
-
-An in-memory caching layer is provided by combining the BasicStore
-functionality with a simple map structure, and keeping it up-to-date as
-appropriate. Since the map structure in Go is not threadsafe, it's combined
-with a RWMutex to provide safe concurrent access.
-
-## Adding order
-
-diskv is a key-value store and therefore inherently unordered. An ordering
-system can be injected into the store by passing something which satisfies the
-diskv.Index interface. (A default implementation, using Google's
-[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
-user-provided Less function) index of the keys, which can be queried.
-
-[7]: https://github.com/google/btree
-
-## Adding compression
-
-Something which implements the diskv.Compression interface may be passed
-during store creation, so that all Writes and Reads are filtered through
-a compression/decompression pipeline. Several default implementations,
-using stdlib compression algorithms, are provided. Note that data is cached
-compressed; the cost of decompression is borne with each Read.
-
-## Streaming
-
-diskv also now provides ReadStream and WriteStream methods, to allow very large
-data to be handled efficiently.
-
-
-# Future plans
-
- * Needs plenty of robust testing: huge datasets, etc...
- * More thorough benchmarking
- * Your suggestions for use-cases I haven't thought of
diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go
deleted file mode 100644
index 5192b02..0000000
--- a/vendor/github.com/peterbourgon/diskv/compression.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package diskv
-
-import (
-	"compress/flate"
-	"compress/gzip"
-	"compress/zlib"
-	"io"
-)
-
-// Compression is an interface that Diskv uses to implement compression of
-// data. Writer takes a destination io.Writer and returns a WriteCloser that
-// compresses all data written through it. Reader takes a source io.Reader and
-// returns a ReadCloser that decompresses all data read through it. You may
-// define these methods on your own type, or use one of the NewCompression
-// helpers.
-type Compression interface {
-	Writer(dst io.Writer) (io.WriteCloser, error)
-	Reader(src io.Reader) (io.ReadCloser, error)
-}
-
-// NewGzipCompression returns a Gzip-based Compression.
-func NewGzipCompression() Compression {
-	return NewGzipCompressionLevel(flate.DefaultCompression)
-}
-
-// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
-func NewGzipCompressionLevel(level int) Compression {
-	return &genericCompression{
-		wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
-		rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
-	}
-}
-
-// NewZlibCompression returns a Zlib-based Compression.
-func NewZlibCompression() Compression {
-	return NewZlibCompressionLevel(flate.DefaultCompression)
-}
-
-// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
-func NewZlibCompressionLevel(level int) Compression {
-	return NewZlibCompressionLevelDict(level, nil)
-}
-
-// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
-// level, based on the given dictionary.
-func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
-	return &genericCompression{
-		func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
-		func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
-	}
-}
-
-type genericCompression struct {
-	wf func(w io.Writer) (io.WriteCloser, error)
-	rf func(r io.Reader) (io.ReadCloser, error)
-}
-
-func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
-	return g.wf(dst)
-}
-
-func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
-	return g.rf(src)
-}
diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go
deleted file mode 100644
index 524dc0a..0000000
--- a/vendor/github.com/peterbourgon/diskv/diskv.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Diskv (disk-vee) is a simple, persistent, key-value store.
-// It stores all data flatly on the filesystem.
-
-package diskv
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-	"syscall"
-)
-
-const (
-	defaultBasePath             = "diskv"
-	defaultFilePerm os.FileMode = 0666
-	defaultPathPerm os.FileMode = 0777
-)
-
-var (
-	defaultTransform   = func(s string) []string { return []string{} }
-	errCanceled        = errors.New("canceled")
-	errEmptyKey        = errors.New("empty key")
-	errBadKey          = errors.New("bad key")
-	errImportDirectory = errors.New("can't import a directory")
-)
-
-// TransformFunction transforms a key into a slice of strings, with each
-// element in the slice representing a directory in the file path where the
-// key's entry will eventually be stored.
-//
-// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
-// the final location of the data file will be <basedir>/ab/cde/f/abcdef
-type TransformFunction func(s string) []string
-
-// Options define a set of properties that dictate Diskv behavior.
-// All values are optional.
-type Options struct {
-	BasePath     string
-	Transform    TransformFunction
-	CacheSizeMax uint64 // bytes
-	PathPerm     os.FileMode
-	FilePerm     os.FileMode
-	// If TempDir is set, it will enable filesystem atomic writes by
-	// writing temporary files to that location before being moved
-	// to BasePath.
-	// Note that TempDir MUST be on the same device/partition as
-	// BasePath.
-	TempDir string
-
-	Index     Index
-	IndexLess LessFunction
-
-	Compression Compression
-}
-
-// Diskv implements the Diskv interface. You shouldn't construct Diskv
-// structures directly; instead, use the New constructor.
-type Diskv struct {
-	Options
-	mu        sync.RWMutex
-	cache     map[string][]byte
-	cacheSize uint64
-}
-
-// New returns an initialized Diskv structure, ready to use.
-// If the path identified by baseDir already contains data,
-// it will be accessible, but not yet cached.
-func New(o Options) *Diskv {
-	if o.BasePath == "" {
-		o.BasePath = defaultBasePath
-	}
-	if o.Transform == nil {
-		o.Transform = defaultTransform
-	}
-	if o.PathPerm == 0 {
-		o.PathPerm = defaultPathPerm
-	}
-	if o.FilePerm == 0 {
-		o.FilePerm = defaultFilePerm
-	}
-
-	d := &Diskv{
-		Options:   o,
-		cache:     map[string][]byte{},
-		cacheSize: 0,
-	}
-
-	if d.Index != nil && d.IndexLess != nil {
-		d.Index.Initialize(d.IndexLess, d.Keys(nil))
-	}
-
-	return d
-}
-
-// Write synchronously writes the key-value pair to disk, making it immediately
-// available for reads. Write relies on the filesystem to perform an eventual
-// sync to physical media. If you need stronger guarantees, see WriteStream.
-func (d *Diskv) Write(key string, val []byte) error {
-	return d.WriteStream(key, bytes.NewBuffer(val), false)
-}
-
-// WriteStream writes the data represented by the io.Reader to the disk, under
-// the provided key. If sync is true, WriteStream performs an explicit sync on
-// the file as soon as it's written.
-//
-// bytes.Buffer provides io.Reader semantics for basic data types.
-func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
-	if len(key) <= 0 {
-		return errEmptyKey
-	}
-
-	d.mu.Lock()
-	defer d.mu.Unlock()
-
-	return d.writeStreamWithLock(key, r, sync)
-}
-
-// createKeyFileWithLock either creates the key file directly, or
-// creates a temporary file in TempDir if it is set.
-func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
-	if d.TempDir != "" {
-		if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
-			return nil, fmt.Errorf("temp mkdir: %s", err)
-		}
-		f, err := ioutil.TempFile(d.TempDir, "")
-		if err != nil {
-			return nil, fmt.Errorf("temp file: %s", err)
-		}
-
-		if err := f.Chmod(d.FilePerm); err != nil {
-			f.Close()           // error deliberately ignored
-			os.Remove(f.Name()) // error deliberately ignored
-			return nil, fmt.Errorf("chmod: %s", err)
-		}
-		return f, nil
-	}
-
-	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
-	f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
-	if err != nil {
-		return nil, fmt.Errorf("open file: %s", err)
-	}
-	return f, nil
-}
-
-// writeStream does no input validation checking.
-func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
-	if err := d.ensurePathWithLock(key); err != nil {
-		return fmt.Errorf("ensure path: %s", err)
-	}
-
-	f, err := d.createKeyFileWithLock(key)
-	if err != nil {
-		return fmt.Errorf("create key file: %s", err)
-	}
-
-	wc := io.WriteCloser(&nopWriteCloser{f})
-	if d.Compression != nil {
-		wc, err = d.Compression.Writer(f)
-		if err != nil {
-			f.Close()           // error deliberately ignored
-			os.Remove(f.Name()) // error deliberately ignored
-			return fmt.Errorf("compression writer: %s", err)
-		}
-	}
-
-	if _, err := io.Copy(wc, r); err != nil {
-		f.Close()           // error deliberately ignored
-		os.Remove(f.Name()) // error deliberately ignored
-		return fmt.Errorf("i/o copy: %s", err)
-	}
-
-	if err := wc.Close(); err != nil {
-		f.Close()           // error deliberately ignored
-		os.Remove(f.Name()) // error deliberately ignored
-		return fmt.Errorf("compression close: %s", err)
-	}
-
-	if sync {
-		if err := f.Sync(); err != nil {
-			f.Close()           // error deliberately ignored
-			os.Remove(f.Name()) // error deliberately ignored
-			return fmt.Errorf("file sync: %s", err)
-		}
-	}
-
-	if err := f.Close(); err != nil {
-		return fmt.Errorf("file close: %s", err)
-	}
-
-	if f.Name() != d.completeFilename(key) {
-		if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
-			os.Remove(f.Name()) // error deliberately ignored
-			return fmt.Errorf("rename: %s", err)
-		}
-	}
-
-	if d.Index != nil {
-		d.Index.Insert(key)
-	}
-
-	d.bustCacheWithLock(key) // cache only on read
-
-	return nil
-}
-
-// Import imports the source file into diskv under the destination key. If the
-// destination key already exists, it's overwritten. If move is true, the
-// source file is removed after a successful import.
-func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
-	if dstKey == "" {
-		return errEmptyKey
-	}
-
-	if fi, err := os.Stat(srcFilename); err != nil {
-		return err
-	} else if fi.IsDir() {
-		return errImportDirectory
-	}
-
-	d.mu.Lock()
-	defer d.mu.Unlock()
-
-	if err := d.ensurePathWithLock(dstKey); err != nil {
-		return fmt.Errorf("ensure path: %s", err)
-	}
-
-	if move {
-		if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
-			d.bustCacheWithLock(dstKey)
-			return nil
-		} else if err != syscall.EXDEV {
-			// If it failed due to being on a different device, fall back to copying
-			return err
-		}
-	}
-
-	f, err := os.Open(srcFilename)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-	err = d.writeStreamWithLock(dstKey, f, false)
-	if err == nil && move {
-		err = os.Remove(srcFilename)
-	}
-	return err
-}
-
-// Read reads the key and returns the value.
-// If the key is available in the cache, Read won't touch the disk.
-// If the key is not in the cache, Read will have the side-effect of
-// lazily caching the value.
-func (d *Diskv) Read(key string) ([]byte, error) {
-	rc, err := d.ReadStream(key, false)
-	if err != nil {
-		return []byte{}, err
-	}
-	defer rc.Close()
-	return ioutil.ReadAll(rc)
-}
-
-// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
-// If the value is cached from a previous read, and direct is false,
-// ReadStream will use the cached value. Otherwise, it will return a handle to
-// the file on disk, and cache the data on read.
-//
-// If direct is true, ReadStream will lazily delete any cached value for the
-// key, and return a direct handle to the file on disk.
-//
-// If compression is enabled, ReadStream taps into the io.Reader stream prior
-// to decompression, and caches the compressed data.
-func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
-	d.mu.RLock()
-	defer d.mu.RUnlock()
-
-	if val, ok := d.cache[key]; ok {
-		if !direct {
-			buf := bytes.NewBuffer(val)
-			if d.Compression != nil {
-				return d.Compression.Reader(buf)
-			}
-			return ioutil.NopCloser(buf), nil
-		}
-
-		go func() {
-			d.mu.Lock()
-			defer d.mu.Unlock()
-			d.uncacheWithLock(key, uint64(len(val)))
-		}()
-	}
-
-	return d.readWithRLock(key)
-}
-
-// read ignores the cache, and returns an io.ReadCloser representing the
-// decompressed data for the given key, streamed from the disk. Clients should
-// acquire a read lock on the Diskv and check the cache themselves before
-// calling read.
-func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
-	filename := d.completeFilename(key)
-
-	fi, err := os.Stat(filename)
-	if err != nil {
-		return nil, err
-	}
-	if fi.IsDir() {
-		return nil, os.ErrNotExist
-	}
-
-	f, err := os.Open(filename)
-	if err != nil {
-		return nil, err
-	}
-
-	var r io.Reader
-	if d.CacheSizeMax > 0 {
-		r = newSiphon(f, d, key)
-	} else {
-		r = &closingReader{f}
-	}
-
-	var rc = io.ReadCloser(ioutil.NopCloser(r))
-	if d.Compression != nil {
-		rc, err = d.Compression.Reader(r)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return rc, nil
-}
-
-// closingReader provides a Reader that automatically closes the
-// embedded ReadCloser when it reaches EOF
-type closingReader struct {
-	rc io.ReadCloser
-}
-
-func (cr closingReader) Read(p []byte) (int, error) {
-	n, err := cr.rc.Read(p)
-	if err == io.EOF {
-		if closeErr := cr.rc.Close(); closeErr != nil {
-			return n, closeErr // close must succeed for Read to succeed
-		}
-	}
-	return n, err
-}
-
-// siphon is like a TeeReader: it copies all data read through it to an
-// internal buffer, and moves that buffer to the cache at EOF.
-type siphon struct {
-	f   *os.File
-	d   *Diskv
-	key string
-	buf *bytes.Buffer
-}
-
-// newSiphon constructs a siphoning reader that represents the passed file.
-// When a successful series of reads ends in an EOF, the siphon will write
-// the buffered data to Diskv's cache under the given key.
-func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
-	return &siphon{
-		f:   f,
-		d:   d,
-		key: key,
-		buf: &bytes.Buffer{},
-	}
-}
-
-// Read implements the io.Reader interface for siphon.
-func (s *siphon) Read(p []byte) (int, error) {
-	n, err := s.f.Read(p)
-
-	if err == nil {
-		return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
-	}
-
-	if err == io.EOF {
-		s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
-		if closeErr := s.f.Close(); closeErr != nil {
-			return n, closeErr // close must succeed for Read to succeed
-		}
-		return n, err
-	}
-
-	return n, err
-}
-
-// Erase synchronously erases the given key from the disk and the cache.
-func (d *Diskv) Erase(key string) error {
-	d.mu.Lock()
-	defer d.mu.Unlock()
-
-	d.bustCacheWithLock(key)
-
-	// erase from index
-	if d.Index != nil {
-		d.Index.Delete(key)
-	}
-
-	// erase from disk
-	filename := d.completeFilename(key)
-	if s, err := os.Stat(filename); err == nil {
-		if s.IsDir() {
-			return errBadKey
-		}
-		if err = os.Remove(filename); err != nil {
-			return err
-		}
-	} else {
-		// Return err as-is so caller can do os.IsNotExist(err).
-		return err
-	}
-
-	// clean up and return
-	d.pruneDirsWithLock(key)
-	return nil
-}
-
-// EraseAll will delete all of the data from the store, both in the cache and on
-// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
-// diskv-related data. Care should be taken to always specify a diskv base
-// directory that is exclusively for diskv data.
-func (d *Diskv) EraseAll() error {
-	d.mu.Lock()
-	defer d.mu.Unlock()
-	d.cache = make(map[string][]byte)
-	d.cacheSize = 0
-	if d.TempDir != "" {
-		os.RemoveAll(d.TempDir) // errors ignored
-	}
-	return os.RemoveAll(d.BasePath)
-}
-
-// Has returns true if the given key exists.
-func (d *Diskv) Has(key string) bool {
-	d.mu.Lock()
-	defer d.mu.Unlock()
-
-	if _, ok := d.cache[key]; ok {
-		return true
-	}
-
-	filename := d.completeFilename(key)
-	s, err := os.Stat(filename)
-	if err != nil {
-		return false
-	}
-	if s.IsDir() {
-		return false
-	}
-
-	return true
-}
-
-// Keys returns a channel that will yield every key accessible by the store,
-// in undefined order. If a cancel channel is provided, closing it will
-// terminate and close the keys channel.
-func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
-	return d.KeysPrefix("", cancel)
-}
-
-// KeysPrefix returns a channel that will yield every key accessible by the
-// store with the given prefix, in undefined order. If a cancel channel is
-// provided, closing it will terminate and close the keys channel. If the
-// provided prefix is the empty string, all keys will be yielded.
-func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
-	var prepath string
-	if prefix == "" {
-		prepath = d.BasePath
-	} else {
-		prepath = d.pathFor(prefix)
-	}
-	c := make(chan string)
-	go func() {
-		filepath.Walk(prepath, walker(c, prefix, cancel))
-		close(c)
-	}()
-	return c
-}
-
-// walker returns a function which satisfies the filepath.WalkFunc interface.
-// It sends every non-directory file entry down the channel c.
-func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
-	return func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-
-		if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
-			return nil // "pass"
-		}
-
-		select {
-		case c <- info.Name():
-		case <-cancel:
-			return errCanceled
-		}
-
-		return nil
-	}
-}
-
-// pathFor returns the absolute path for location on the filesystem where the
-// data for the given key will be stored.
-func (d *Diskv) pathFor(key string) string {
-	return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
-}
-
-// ensurePathWithLock is a helper function that generates all necessary
-// directories on the filesystem for the given key.
-func (d *Diskv) ensurePathWithLock(key string) error {
-	return os.MkdirAll(d.pathFor(key), d.PathPerm)
-}
-
-// completeFilename returns the absolute path to the file for the given key.
-func (d *Diskv) completeFilename(key string) string {
-	return filepath.Join(d.pathFor(key), key)
-}
-
-// cacheWithLock attempts to cache the given key-value pair in the store's
-// cache. It can fail if the value is larger than the cache's maximum size.
-func (d *Diskv) cacheWithLock(key string, val []byte) error {
-	valueSize := uint64(len(val))
-	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
-		return fmt.Errorf("%s; not caching", err)
-	}
-
-	// be very strict about memory guarantees
-	if (d.cacheSize + valueSize) > d.CacheSizeMax {
-		panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
-	}
-
-	d.cache[key] = val
-	d.cacheSize += valueSize
-	return nil
-}
-
-// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
-func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
-	d.mu.Lock()
-	defer d.mu.Unlock()
-	return d.cacheWithLock(key, val)
-}
-
-func (d *Diskv) bustCacheWithLock(key string) {
-	if val, ok := d.cache[key]; ok {
-		d.uncacheWithLock(key, uint64(len(val)))
-	}
-}
-
-func (d *Diskv) uncacheWithLock(key string, sz uint64) {
-	d.cacheSize -= sz
-	delete(d.cache, key)
-}
-
-// pruneDirsWithLock deletes empty directories in the path walk leading to the
-// key k. Typically this function is called after an Erase is made.
-func (d *Diskv) pruneDirsWithLock(key string) error {
-	pathlist := d.Transform(key)
-	for i := range pathlist {
-		dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
-
-		// thanks to Steven Blenkinsop for this snippet
-		switch fi, err := os.Stat(dir); true {
-		case err != nil:
-			return err
-		case !fi.IsDir():
-			panic(fmt.Sprintf("corrupt dirstate at %s", dir))
-		}
-
-		nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
-		if err != nil {
-			return err
-		} else if len(nlinks) > 0 {
-			return nil // has subdirs -- do not prune
-		}
-		if err = os.Remove(dir); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
-// until the cache has at least valueSize bytes available.
-func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
-	if valueSize > d.CacheSizeMax {
-		return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
-	}
-
-	safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
-
-	for key, val := range d.cache {
-		if safe() {
-			break
-		}
-
-		d.uncacheWithLock(key, uint64(len(val)))
-	}
-
-	if !safe() {
-		panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
-	}
-
-	return nil
-}
-
-// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
-// satisfy the io.WriteCloser interface.
-type nopWriteCloser struct {
-	io.Writer
-}
-
-func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
-func (wc *nopWriteCloser) Close() error                { return nil }
diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go
deleted file mode 100644
index 96fee51..0000000
--- a/vendor/github.com/peterbourgon/diskv/index.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package diskv
-
-import (
-	"sync"
-
-	"github.com/google/btree"
-)
-
-// Index is a generic interface for things that can
-// provide an ordered list of keys.
-type Index interface {
-	Initialize(less LessFunction, keys <-chan string)
-	Insert(key string)
-	Delete(key string)
-	Keys(from string, n int) []string
-}
-
-// LessFunction is used to initialize an Index of keys in a specific order.
-type LessFunction func(string, string) bool
-
-// btreeString is a custom data type that satisfies the BTree Less interface,
-// making the strings it wraps sortable by the BTree package.
-type btreeString struct {
-	s string
-	l LessFunction
-}
-
-// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
-func (s btreeString) Less(i btree.Item) bool {
-	return s.l(s.s, i.(btreeString).s)
-}
-
-// BTreeIndex is an implementation of the Index interface using google/btree.
-type BTreeIndex struct {
-	sync.RWMutex
-	LessFunction
-	*btree.BTree
-}
-
-// Initialize populates the BTree tree with data from the keys channel,
-// according to the passed less function. It's destructive to the BTreeIndex.
-func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
-	i.Lock()
-	defer i.Unlock()
-	i.LessFunction = less
-	i.BTree = rebuild(less, keys)
-}
-
-// Insert inserts the given key (only) into the BTree tree.
-func (i *BTreeIndex) Insert(key string) {
-	i.Lock()
-	defer i.Unlock()
-	if i.BTree == nil || i.LessFunction == nil {
-		panic("uninitialized index")
-	}
-	i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
-}
-
-// Delete removes the given key (only) from the BTree tree.
-func (i *BTreeIndex) Delete(key string) {
-	i.Lock()
-	defer i.Unlock()
-	if i.BTree == nil || i.LessFunction == nil {
-		panic("uninitialized index")
-	}
-	i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
-}
-
-// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
-// Keys will return the first n keys. If the passed 'from' key is non-empty, the
-// first key in the returned slice will be the key that immediately follows the
-// passed key, in key order.
-func (i *BTreeIndex) Keys(from string, n int) []string {
-	i.RLock()
-	defer i.RUnlock()
-
-	if i.BTree == nil || i.LessFunction == nil {
-		panic("uninitialized index")
-	}
-
-	if i.BTree.Len() <= 0 {
-		return []string{}
-	}
-
-	btreeFrom := btreeString{s: from, l: i.LessFunction}
-	skipFirst := true
-	if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
-		// no such key, so fabricate an always-smallest item
-		btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
-		skipFirst = false
-	}
-
-	keys := []string{}
-	iterator := func(i btree.Item) bool {
-		keys = append(keys, i.(btreeString).s)
-		return len(keys) < n
-	}
-	i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
-
-	if skipFirst && len(keys) > 0 {
-		keys = keys[1:]
-	}
-
-	return keys
-}
-
-// rebuildIndex does the work of regenerating the index
-// with the given keys.
-func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
-	tree := btree.New(2)
-	for key := range keys {
-		tree.ReplaceOrInsert(btreeString{s: key, l: less})
-	}
-	return tree
-}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
index e48bab3..5e98735 100644
--- a/vendor/github.com/pierrec/lz4/.gitignore
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -30,4 +30,5 @@
 
 # End of https://www.gitignore.io/api/macos
 
-lz4c/lz4c
+cmd/*/*exe
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
index 658910d..fd6c6db 100644
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -2,12 +2,12 @@
 
 env:
   - GO111MODULE=off
-  - GO111MODULE=on
 
 go:
   - 1.9.x
   - 1.10.x
   - 1.11.x
+  - 1.12.x
   - master
 
 matrix:
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
index 50a10ee..be1f52a 100644
--- a/vendor/github.com/pierrec/lz4/README.md
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -1,24 +1,106 @@
-[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4)
+# lz4 : LZ4 compression in pure Go
 
-# lz4
-LZ4 compression and decompression in pure Go.
+[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
 
-## Usage
+## Overview
 
-```go
-import "github.com/pierrec/lz4"
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4
 ```
 
-## Description
-Package lz4 implements reading and writing lz4 compressed data (a frame),
-as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
+There is a command line interface tool to compress and decompress LZ4 files.
 
-This package is **compatible with the LZ4 frame format** although the block level compression 
-and decompression functions are exposed and are fully compatible with the lz4 block format 
-definition, they are low level and should not be used directly.
+```
+go install github.com/pierrec/lz4/cmd/lz4c
+```
 
-For a complete description of an lz4 compressed block, see:
-http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+Usage
 
-See https://github.com/Cyan4973/lz4 for the reference C implementation.
+```
+Usage of lz4c:
+  -version
+        print the program version
 
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all contributors so far:
+
+- [@klauspost](https://github.com/klauspost)
+- [@heidawei](https://github.com/heidawei)
+- [@x4m](https://github.com/x4m)
+- [@Zariel](https://github.com/Zariel)
+- [@edwingeng](https://github.com/edwingeng)
+- [@danielmoy-google](https://github.com/danielmoy-google)
+- [@honda-tatsuya](https://github.com/honda-tatsuya)
+- [@h8liu](https://github.com/h8liu)
+- [@sbinet](https://github.com/sbinet)
+- [@fingon](https://github.com/fingon)
+- [@emfree](https://github.com/emfree)
+- [@lhemala](https://github.com/lhemala)
+- [@connor4312](https://github.com/connor4312)
+- [@oov](https://github.com/oov)
+- [@arya](https://github.com/arya)
+- [@ikkeps](https://github.com/ikkeps)
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
index d96e0e7..5755cda 100644
--- a/vendor/github.com/pierrec/lz4/block.go
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -2,21 +2,14 @@
 
 import (
 	"encoding/binary"
-	"errors"
+	"fmt"
+	"math/bits"
 )
 
-var (
-	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
-	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
-	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
-	// ErrInvalid is returned when reading an invalid LZ4 archive.
-	ErrInvalid = errors.New("lz4: bad magic number")
-)
-
-// blockHash hashes 4 bytes into a value < winSize.
-func blockHash(x uint32) uint32 {
-	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
-	return x * hasher >> hashShift
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
 }
 
 // CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
@@ -30,17 +23,14 @@
 // The destination buffer must be sized appropriately.
 //
 // An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte) (di int, err error) {
-	sn := len(src)
-	if sn == 0 {
+func UncompressBlock(src, dst []byte) (int, error) {
+	if len(src) == 0 {
 		return 0, nil
 	}
-
-	di = decodeBlock(dst, src)
-	if di < 0 {
-		return 0, ErrInvalidSourceShortBuffer
+	if di := decodeBlock(dst, src); di >= 0 {
+		return di, nil
 	}
-	return di, nil
+	return 0, ErrInvalidSourceShortBuffer
 }
 
 // CompressBlock compresses the source buffer into the destination one.
@@ -51,58 +41,98 @@
 //
 // An error is returned if the destination buffer is too small.
 func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
-	defer func() {
-		if recover() != nil {
-			err = ErrInvalidSourceShortBuffer
-		}
-	}()
+	defer recoverBlock(&err)
 
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
 	sn, dn := len(src)-mfLimit, len(dst)
 	if sn <= 0 || dn == 0 {
 		return 0, nil
 	}
-	var si int
+	if len(hashTable) < htSize {
+		return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize)
+	}
+	// Prove to the compiler the table has at least htSize elements.
+	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
+	hashTable = hashTable[:htSize]
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, anchor int
 
 	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
-	// const accInit = 1 << skipStrength
-
-	anchor := si // Position of the current literals.
-	// acc := accInit // Variable step: improves performance on non-compressible data.
-
 	for si < sn {
-		// Hash the next 4 bytes (sequence)...
-		match := binary.LittleEndian.Uint32(src[si:])
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
 		h := blockHash(match)
+		h2 := blockHash(match >> 8)
 
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
 		ref := hashTable[h]
+		ref2 := hashTable[h2]
 		hashTable[h] = si
-		if ref >= sn { // Invalid reference (dirty hashtable).
-			si++
-			continue
-		}
+		hashTable[h2] = si + 1
 		offset := si - ref
+
+		// If offset <= 0 we got an old entry in the hash table.
 		if offset <= 0 || offset >= winSize || // Out of window.
-			match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
-			// si += acc >> skipStrength
-			// acc++
-			si++
-			continue
+			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref = hashTable[h]
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize ||
+				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref
+				hashTable[h] = si
+
+				if offset <= 0 || offset >= winSize ||
+					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
 		}
 
 		// Match found.
-		// acc = accInit
 		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
 
-		// Encode match length part 1.
-		si += minMatch
-		mLen := si // Match length has minMatch already.
-		// Find the longest match, first looking by batches of 8 bytes.
-		for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) {
-			si += 8
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
 		}
-		// Then byte by byte.
-		for si < sn && src[si] == src[si-offset] {
-			si++
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si < sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
 		}
 
 		mLen = si - mLen
@@ -145,6 +175,13 @@
 			dst[di] = byte(mLen)
 			di++
 		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		hashTable[h] = si - 2
 	}
 
 	if anchor == 0 {
@@ -176,6 +213,12 @@
 	return di, nil
 }
 
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
 // CompressBlockHC compresses the source buffer src into the destination dst
 // with max search depth (use 0 or negative value for no max).
 //
@@ -185,11 +228,12 @@
 //
 // An error is returned if the destination buffer is too small.
 func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
-	defer func() {
-		if recover() != nil {
-			err = ErrInvalidSourceShortBuffer
-		}
-	}()
+	defer recoverBlock(&err)
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
 
 	sn, dn := len(src)-mfLimit, len(dst)
 	if sn <= 0 || dn == 0 {
@@ -198,7 +242,7 @@
 	var si int
 
 	// hashTable: stores the last position found for a given hash
-	// chaingTable: stores previous positions for a given hash
+	// chainTable: stores previous positions for a given hash
 	var hashTable, chainTable [winSize]int
 
 	if depth <= 0 {
@@ -209,7 +253,7 @@
 	for si < sn {
 		// Hash the next 4 bytes (sequence).
 		match := binary.LittleEndian.Uint32(src[si:])
-		h := blockHash(match)
+		h := blockHashHC(match)
 
 		// Follow the chain until out of window and give the longest match.
 		mLen := 0
@@ -222,11 +266,15 @@
 			}
 			ml := 0
 			// Compare the current position with a previous with the same hash.
-			for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) {
-				ml += 8
-			}
-			for ml < sn-si && src[next+ml] == src[si+ml] {
-				ml++
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
 			}
 			if ml < minMatch || ml <= mLen {
 				// Match too small (<minMath) or smaller than the current match.
@@ -243,7 +291,7 @@
 
 		// No match found.
 		if mLen == 0 {
-			si++
+			si += 1 + (si-anchor)>>adaptSkipLog
 			continue
 		}
 
@@ -257,7 +305,7 @@
 		for si, ml := winStart, si+mLen; si < ml; {
 			match >>= 8
 			match |= uint32(src[si+3]) << 24
-			h := blockHash(match)
+			h := blockHashHC(match)
 			chainTable[si&winMask] = hashTable[h]
 			hashTable[h] = si
 			si++
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
index b83a19a..919888e 100644
--- a/vendor/github.com/pierrec/lz4/decode_other.go
+++ b/vendor/github.com/pierrec/lz4/decode_other.go
@@ -3,11 +3,10 @@
 package lz4
 
 func decodeBlock(dst, src []byte) (ret int) {
+	const hasError = -2
 	defer func() {
-		// It is now faster to let the runtime panic and recover on out of bound slice access
-		// than checking indices as we go along.
 		if recover() != nil {
-			ret = -2
+			ret = hasError
 		}
 	}()
 
@@ -20,7 +19,7 @@
 		// Literals.
 		if lLen := b >> 4; lLen > 0 {
 			switch {
-			case lLen < 0xF && di+18 < len(dst) && si+16 < len(src):
+			case lLen < 0xF && si+16 < len(src):
 				// Shortcut 1
 				// if we have enough room in src and dst, and the literals length
 				// is small enough (0..14) then copy all 16 bytes, even if not all
@@ -35,7 +34,13 @@
 					mLen += 4
 					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
 						i := di - offset
-						copy(dst[di:], dst[i:i+18])
+						end := i + 18
+						if end > len(dst) {
+							// The remaining buffer may not hold 18 bytes.
+							// See https://github.com/pierrec/lz4/issues/51.
+							end = len(dst)
+						}
+						copy(dst[di:], dst[i:end])
 						si += 2
 						di += mLen
 						continue
@@ -61,7 +66,7 @@
 
 		offset := int(src[si]) | int(src[si+1])<<8
 		if offset == 0 {
-			return -2
+			return hasError
 		}
 		si += 2
 
@@ -90,6 +95,4 @@
 		}
 		di += copy(dst[di:di+mLen], expanded[:mLen])
 	}
-
-	return di
 }
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
new file mode 100644
index 0000000..1c45d18
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/errors.go
@@ -0,0 +1,30 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	rdebug "runtime/debug"
+)
+
+var (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
+	// ErrInvalid is returned when reading an invalid LZ4 archive.
+	ErrInvalid = errors.New("lz4: bad magic number")
+	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
+	ErrBlockDependency = errors.New("lz4: block dependency not supported")
+	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
+	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		if debugFlag {
+			fmt.Fprintln(os.Stderr, r)
+			rdebug.PrintStack()
+		}
+		*e = ErrInvalidSourceShortBuffer
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/go.mod b/vendor/github.com/pierrec/lz4/go.mod
deleted file mode 100644
index f9f570a..0000000
--- a/vendor/github.com/pierrec/lz4/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/pierrec/lz4
-
-require github.com/pkg/profile v1.2.1
diff --git a/vendor/github.com/pierrec/lz4/go.sum b/vendor/github.com/pierrec/lz4/go.sum
deleted file mode 100644
index 6ca7598..0000000
--- a/vendor/github.com/pierrec/lz4/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
index 850a6fd..7a76a6b 100644
--- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
@@ -7,14 +7,15 @@
 )
 
 const (
-	prime32_1 uint32 = 2654435761
-	prime32_2 uint32 = 2246822519
-	prime32_3 uint32 = 3266489917
-	prime32_4 uint32 = 668265263
-	prime32_5 uint32 = 374761393
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
 
-	prime32_1plus2 uint32 = 606290984
-	prime32_minus1 uint32 = 1640531535
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
 )
 
 // XXHZero represents an xxhash32 object with seed 0.
@@ -37,10 +38,10 @@
 
 // Reset resets the Hash to its initial state.
 func (xxh *XXHZero) Reset() {
-	xxh.v1 = prime32_1plus2
-	xxh.v2 = prime32_2
+	xxh.v1 = prime1plus2
+	xxh.v2 = prime2
 	xxh.v3 = 0
-	xxh.v4 = prime32_minus1
+	xxh.v4 = prime1minus
 	xxh.totalLen = 0
 	xxh.bufused = 0
 }
@@ -83,20 +84,20 @@
 
 		// fast rotl(13)
 		buf := xxh.buf[:16] // BCE hint.
-		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime32_2) * prime32_1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime32_2) * prime32_1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime32_2) * prime32_1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime32_2) * prime32_1
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
 		p = r
 		xxh.bufused = 0
 	}
 
 	for n := n - 16; p <= n; p += 16 {
 		sub := input[p:][:16] //BCE hint for compiler
-		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
 	}
 	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
 
@@ -112,25 +113,25 @@
 	if h32 >= 16 {
 		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
 	} else {
-		h32 += prime32_5
+		h32 += prime5
 	}
 
 	p := 0
 	n := xxh.bufused
 	buf := xxh.buf
 	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime32_3
-		h32 = rol17(h32) * prime32_4
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
 	}
 	for ; p < n; p++ {
-		h32 += uint32(buf[p]) * prime32_5
-		h32 = rol11(h32) * prime32_1
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
 	}
 
 	h32 ^= h32 >> 15
-	h32 *= prime32_2
+	h32 *= prime2
 	h32 ^= h32 >> 13
-	h32 *= prime32_3
+	h32 *= prime3
 	h32 ^= h32 >> 16
 
 	return h32
@@ -142,19 +143,19 @@
 	h32 := uint32(n)
 
 	if n < 16 {
-		h32 += prime32_5
+		h32 += prime5
 	} else {
-		v1 := prime32_1plus2
-		v2 := prime32_2
+		v1 := prime1plus2
+		v2 := prime2
 		v3 := uint32(0)
-		v4 := prime32_minus1
+		v4 := prime1minus
 		p := 0
 		for n := n - 16; p <= n; p += 16 {
 			sub := input[p:][:16] //BCE hint for compiler
-			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1
-			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1
-			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1
-			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
 		}
 		input = input[p:]
 		n -= p
@@ -163,19 +164,19 @@
 
 	p := 0
 	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime32_3
-		h32 = rol17(h32) * prime32_4
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
 	}
 	for p < n {
-		h32 += uint32(input[p]) * prime32_5
-		h32 = rol11(h32) * prime32_1
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
 		p++
 	}
 
 	h32 ^= h32 >> 15
-	h32 *= prime32_2
+	h32 *= prime2
 	h32 ^= h32 >> 13
-	h32 *= prime32_3
+	h32 *= prime3
 	h32 ^= h32 >> 16
 
 	return h32
@@ -183,12 +184,12 @@
 
 // Uint32Zero hashes x with seed 0.
 func Uint32Zero(x uint32) uint32 {
-	h := prime32_5 + 4 + x*prime32_3
-	h = rol17(h) * prime32_4
+	h := prime5 + 4 + x*prime3
+	h = rol17(h) * prime4
 	h ^= h >> 15
-	h *= prime32_2
+	h *= prime2
 	h ^= h >> 13
-	h *= prime32_3
+	h *= prime3
 	h ^= h >> 16
 	return h
 }
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
index 3580275..cdbf961 100644
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -30,27 +30,25 @@
 	// hashLog determines the size of the hash table used to quickly find a previous match position.
 	// Its value influences the compression speed and memory usage, the lower the faster,
 	// but at the expense of the compression ratio.
-	// 16 seems to be the best compromise.
-	hashLog       = 16
-	hashTableSize = 1 << hashLog
-	hashShift     = uint((minMatch * 8) - hashLog)
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
 
-	mfLimit      = 8 + minMatch // The last match cannot start within the last 12 bytes.
-	skipStrength = 6            // variable step for fast scan
+	mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
 )
 
 // map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-var (
-	bsMapID    = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
-	bsMapValue = make(map[int]byte, len(bsMapID))
+const (
+	blockSize64K  = 64 << 10
+	blockSize256K = 256 << 10
+	blockSize1M   = 1 << 20
+	blockSize4M   = 4 << 20
 )
 
-// Reversed.
-func init() {
-	for i, v := range bsMapID {
-		bsMapValue[v] = i
-	}
-}
+var (
+	bsMapID    = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
+	bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7}
+)
 
 // Header describes the various flags that can be set on a Writer or obtained from a Reader.
 // The default values match those of the LZ4 frame format definition
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
index f08db47..126b792 100644
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -14,6 +14,9 @@
 // The Header may change between Read() calls in case of concatenated frames.
 type Reader struct {
 	Header
+	// Handler called when a block has been successfully read.
+	// It provides the number of bytes read.
+	OnBlockDone func(size int)
 
 	buf      [8]byte       // Scrap buffer.
 	pos      int64         // Current position in src.
@@ -22,6 +25,8 @@
 	data     []byte        // Uncompressed data.
 	idx      int           // Index of unread bytes into data.
 	checksum xxh32.XXHZero // Frame hash.
+	skip     int64         // Bytes to skip before next read.
+	dpos     int64         // Position in dest
 }
 
 // NewReader returns a new LZ4 frame decoder.
@@ -76,7 +81,7 @@
 		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
 	}
 	if b>>5&1 == 0 {
-		return fmt.Errorf("lz4: block dependency not supported")
+		return ErrBlockDependency
 	}
 	z.BlockChecksum = b>>4&1 > 0
 	frameSize := b>>3&1 > 0
@@ -101,7 +106,7 @@
 	z.data = z.zdata[:cap(z.zdata)][bSize:]
 	z.idx = len(z.data)
 
-	z.checksum.Write(buf[0:2])
+	_, _ = z.checksum.Write(buf[0:2])
 
 	if frameSize {
 		buf := buf[:8]
@@ -110,7 +115,7 @@
 		}
 		z.Size = binary.LittleEndian.Uint64(buf)
 		z.pos += 8
-		z.checksum.Write(buf)
+		_, _ = z.checksum.Write(buf)
 	}
 
 	// Header checksum.
@@ -158,6 +163,9 @@
 		if debugFlag {
 			debug("reading block from writer")
 		}
+		// Reset uncompressed buffer
+		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
+
 		// Block length: 0 = end of frame, highest bit set: uncompressed.
 		bLen, err := z.readUint32()
 		if err != nil {
@@ -208,6 +216,9 @@
 				return 0, err
 			}
 			z.pos += int64(bLen)
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(int(bLen))
+			}
 
 			if z.BlockChecksum {
 				checksum, err := z.readUint32()
@@ -252,10 +263,13 @@
 				return 0, err
 			}
 			z.data = z.data[:n]
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(n)
+			}
 		}
 
 		if !z.NoChecksum {
-			z.checksum.Write(z.data)
+			_, _ = z.checksum.Write(z.data)
 			if debugFlag {
 				debug("current frame checksum %x", z.checksum.Sum32())
 			}
@@ -263,8 +277,20 @@
 		z.idx = 0
 	}
 
+	if z.skip > int64(len(z.data[z.idx:])) {
+		z.skip -= int64(len(z.data[z.idx:]))
+		z.dpos += int64(len(z.data[z.idx:]))
+		z.idx = len(z.data)
+		return 0, nil
+	}
+
+	z.idx += int(z.skip)
+	z.dpos += z.skip
+	z.skip = 0
+
 	n := copy(buf, z.data[z.idx:])
 	z.idx += n
+	z.dpos += int64(n)
 	if debugFlag {
 		debug("copied %d bytes to input", n)
 	}
@@ -272,6 +298,20 @@
 	return n, nil
 }
 
+// Seek implements io.Seeker, but supports seeking forward from the current
+// position only. Any other seek will return an error. Allows skipping output
+// bytes which aren't needed, which in some scenarios is faster than reading
+// and discarding them.
+// Note this may cause future calls to Read() to read 0 bytes if all of the
+// data they would have returned is skipped.
+func (z *Reader) Seek(offset int64, whence int) (int64, error) {
+	if offset < 0 || whence != io.SeekCurrent {
+		return z.dpos + z.skip, ErrUnsupportedSeek
+	}
+	z.skip += offset
+	return z.dpos + z.skip, nil
+}
+
 // Reset discards the Reader's state and makes it equivalent to the
 // result of its original state from NewReader, but reading from r instead.
 // This permits reusing a Reader rather than allocating a new one.
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
index 0120438..2cc8d95 100644
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -11,6 +11,9 @@
 // Writer implements the LZ4 frame encoder.
 type Writer struct {
 	Header
+	// Handler called when a block has been successfully written out.
+	// It provides the number of bytes written.
+	OnBlockDone func(size int)
 
 	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
 	dst       io.Writer     // Destination.
@@ -43,11 +46,13 @@
 	}
 	// Allocate the compressed/uncompressed buffers.
 	// The compressed buffer cannot exceed the uncompressed one.
-	if n := 2 * bSize; cap(z.zdata) < n {
-		z.zdata = make([]byte, n, n)
+	if cap(z.zdata) < bSize {
+		// Only allocate if there is not enough capacity.
+		// Allocate both buffers at once.
+		z.zdata = make([]byte, 2*bSize)
 	}
-	z.zdata = z.zdata[:bSize]
-	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.data = z.zdata[:bSize]                 // Uncompressed buffer is the first half.
+	z.zdata = z.zdata[:cap(z.zdata)][bSize:] // Compressed buffer is the second half.
 	z.idx = 0
 
 	// Size is optional.
@@ -182,24 +187,26 @@
 	if err := z.writeUint32(bLen); err != nil {
 		return err
 	}
-	if _, err := z.dst.Write(zdata); err != nil {
+	written, err := z.dst.Write(zdata)
+	if err != nil {
 		return err
 	}
+	if h := z.OnBlockDone; h != nil {
+		h(written)
+	}
 
-	if z.BlockChecksum {
-		checksum := xxh32.ChecksumZero(zdata)
+	if !z.BlockChecksum {
 		if debugFlag {
-			debug("block checksum %x", checksum)
+			debug("current frame checksum %x", z.checksum.Sum32())
 		}
-		if err := z.writeUint32(checksum); err != nil {
-			return err
-		}
+		return nil
 	}
+	checksum := xxh32.ChecksumZero(zdata)
 	if debugFlag {
-		debug("current frame checksum %x", z.checksum.Sum32())
+		debug("block checksum %x", checksum)
+		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
 	}
-
-	return nil
+	return z.writeUint32(checksum)
 }
 
 // Flush flushes any pending compressed data to the underlying writer.
@@ -213,7 +220,11 @@
 		return nil
 	}
 
-	return z.compressBlock(z.data[:z.idx])
+	if err := z.compressBlock(z.data[:z.idx]); err != nil {
+		return err
+	}
+	z.idx = 0
+	return nil
 }
 
 // Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
@@ -223,7 +234,6 @@
 			return err
 		}
 	}
-
 	if err := z.Flush(); err != nil {
 		return err
 	}
@@ -234,16 +244,14 @@
 	if err := z.writeUint32(0); err != nil {
 		return err
 	}
-	if !z.NoChecksum {
-		checksum := z.checksum.Sum32()
-		if debugFlag {
-			debug("stream checksum %x", checksum)
-		}
-		if err := z.writeUint32(checksum); err != nil {
-			return err
-		}
+	if z.NoChecksum {
+		return nil
 	}
-	return nil
+	checksum := z.checksum.Sum32()
+	if debugFlag {
+		debug("stream checksum %x", checksum)
+	}
+	return z.writeUint32(checksum)
 }
 
 // Reset clears the state of the Writer z such that it is equivalent to its
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
index 117763e..aead076 100644
--- a/vendor/github.com/rcrowley/go-metrics/.travis.yml
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -1,13 +1,16 @@
 language: go
 
 go:
-    - 1.3
-    - 1.4
-    - 1.5
-    - 1.6
-    - 1.7
-    - 1.8
-    - 1.9
+    - "1.3"
+    - "1.4"
+    - "1.5"
+    - "1.6"
+    - "1.7"
+    - "1.8"
+    - "1.9"
+    - "1.10"
+    - "1.11"
+    - "1.12"
 
 script:
     - ./validate.sh
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
index b7356b5..27ddfee 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -157,6 +157,7 @@
 
 Clients are available for the following destinations:
 
+* AppOptics - https://github.com/ysamlan/go-metrics-appoptics
 * Librato - https://github.com/mihasya/go-metrics-librato
 * Graphite - https://github.com/cyberdelia/go-metrics-graphite
 * InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
@@ -166,3 +167,5 @@
 * SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
 * Honeycomb - https://github.com/getspine/go-metrics-honeycomb
 * Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront
+* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon
+* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics)
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
index 043ccef..179e5aa 100644
--- a/vendor/github.com/rcrowley/go-metrics/debug.go
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -2,6 +2,7 @@
 
 import (
 	"runtime/debug"
+	"sync"
 	"time"
 )
 
@@ -16,7 +17,8 @@
 		}
 		ReadGCStats Timer
 	}
-	gcStats debug.GCStats
+	gcStats                  debug.GCStats
+	registerDebugMetricsOnce = sync.Once{}
 )
 
 // Capture new values for the Go garbage collector statistics exported in
@@ -54,19 +56,21 @@
 // debug.GCStats.  The metrics are named by their fully-qualified Go symbols,
 // i.e. debug.GCStats.PauseTotal.
 func RegisterDebugGCStats(r Registry) {
-	debugMetrics.GCStats.LastGC = NewGauge()
-	debugMetrics.GCStats.NumGC = NewGauge()
-	debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
-	//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
-	debugMetrics.GCStats.PauseTotal = NewGauge()
-	debugMetrics.ReadGCStats = NewTimer()
+	registerDebugMetricsOnce.Do(func() {
+		debugMetrics.GCStats.LastGC = NewGauge()
+		debugMetrics.GCStats.NumGC = NewGauge()
+		debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+		//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+		debugMetrics.GCStats.PauseTotal = NewGauge()
+		debugMetrics.ReadGCStats = NewTimer()
 
-	r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
-	r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
-	r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
-	//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
-	r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
-	r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+		r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+		r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+		r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+		//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+		r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+		r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+	})
 }
 
 // Allocate an initial slice for gcStats.Pause to avoid allocations during
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
index f8074c0..2614a0a 100644
--- a/vendor/github.com/rcrowley/go-metrics/log.go
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -8,17 +8,37 @@
 	Printf(format string, v ...interface{})
 }
 
+// Log outputs each metric in the given registry periodically using the given logger.
 func Log(r Registry, freq time.Duration, l Logger) {
 	LogScaled(r, freq, time.Nanosecond, l)
 }
 
-// Output each metric in the given registry periodically using the given
+// LogOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger
+func LogOnCue(r Registry, ch chan interface{}, l Logger) {
+	LogScaledOnCue(r, ch, time.Nanosecond, l)
+}
+
+// LogScaled outputs each metric in the given registry periodically using the given
 // logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
 func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+	ch := make(chan interface{})
+	go func(channel chan interface{}) {
+		for _ = range time.Tick(freq) {
+			channel <- struct{}{}
+		}
+	}(ch)
+	LogScaledOnCue(r, ch, scale, l)
+}
+
+// LogScaledOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather
+// than nanos.
+func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) {
 	du := float64(scale)
 	duSuffix := scale.String()[1:]
 
-	for _ = range time.Tick(freq) {
+	for _ = range ch {
 		r.Each(func(name string, i interface{}) {
 			switch metric := i.(type) {
 			case Counter:
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
index b3bab64..a8e6722 100644
--- a/vendor/github.com/rcrowley/go-metrics/registry.go
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -64,8 +64,10 @@
 
 // Call the given function for each registered metric.
 func (r *StandardRegistry) Each(f func(string, interface{})) {
-	for name, i := range r.registered() {
-		f(name, i)
+	metrics := r.registered()
+	for i := range metrics {
+		kv := &metrics[i]
+		f(kv.name, kv.value)
 	}
 }
 
@@ -211,12 +213,20 @@
 	return nil
 }
 
-func (r *StandardRegistry) registered() map[string]interface{} {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
-	metrics := make(map[string]interface{}, len(r.metrics))
+type metricKV struct {
+	name  string
+	value interface{}
+}
+
+func (r *StandardRegistry) registered() []metricKV {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	metrics := make([]metricKV, 0, len(r.metrics))
 	for name, i := range r.metrics {
-		metrics[name] = i
+		metrics = append(metrics, metricKV{
+			name:  name,
+			value: i,
+		})
 	}
 	return metrics
 }
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
index 11c6b78..4047ab3 100644
--- a/vendor/github.com/rcrowley/go-metrics/runtime.go
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -3,6 +3,7 @@
 import (
 	"runtime"
 	"runtime/pprof"
+	"sync"
 	"time"
 )
 
@@ -49,7 +50,8 @@
 	numGC       uint32
 	numCgoCalls int64
 
-	threadCreateProfile = pprof.Lookup("threadcreate")
+	threadCreateProfile        = pprof.Lookup("threadcreate")
+	registerRuntimeMetricsOnce = sync.Once{}
 )
 
 // Capture new values for the Go runtime statistics exported in
@@ -146,67 +148,69 @@
 // specifically runtime.MemStats.  The runtimeMetrics are named by their
 // fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
 func RegisterRuntimeMemStats(r Registry) {
-	runtimeMetrics.MemStats.Alloc = NewGauge()
-	runtimeMetrics.MemStats.BuckHashSys = NewGauge()
-	runtimeMetrics.MemStats.DebugGC = NewGauge()
-	runtimeMetrics.MemStats.EnableGC = NewGauge()
-	runtimeMetrics.MemStats.Frees = NewGauge()
-	runtimeMetrics.MemStats.HeapAlloc = NewGauge()
-	runtimeMetrics.MemStats.HeapIdle = NewGauge()
-	runtimeMetrics.MemStats.HeapInuse = NewGauge()
-	runtimeMetrics.MemStats.HeapObjects = NewGauge()
-	runtimeMetrics.MemStats.HeapReleased = NewGauge()
-	runtimeMetrics.MemStats.HeapSys = NewGauge()
-	runtimeMetrics.MemStats.LastGC = NewGauge()
-	runtimeMetrics.MemStats.Lookups = NewGauge()
-	runtimeMetrics.MemStats.Mallocs = NewGauge()
-	runtimeMetrics.MemStats.MCacheInuse = NewGauge()
-	runtimeMetrics.MemStats.MCacheSys = NewGauge()
-	runtimeMetrics.MemStats.MSpanInuse = NewGauge()
-	runtimeMetrics.MemStats.MSpanSys = NewGauge()
-	runtimeMetrics.MemStats.NextGC = NewGauge()
-	runtimeMetrics.MemStats.NumGC = NewGauge()
-	runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
-	runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
-	runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
-	runtimeMetrics.MemStats.StackInuse = NewGauge()
-	runtimeMetrics.MemStats.StackSys = NewGauge()
-	runtimeMetrics.MemStats.Sys = NewGauge()
-	runtimeMetrics.MemStats.TotalAlloc = NewGauge()
-	runtimeMetrics.NumCgoCall = NewGauge()
-	runtimeMetrics.NumGoroutine = NewGauge()
-	runtimeMetrics.NumThread = NewGauge()
-	runtimeMetrics.ReadMemStats = NewTimer()
+	registerRuntimeMetricsOnce.Do(func() {
+		runtimeMetrics.MemStats.Alloc = NewGauge()
+		runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+		runtimeMetrics.MemStats.DebugGC = NewGauge()
+		runtimeMetrics.MemStats.EnableGC = NewGauge()
+		runtimeMetrics.MemStats.Frees = NewGauge()
+		runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+		runtimeMetrics.MemStats.HeapIdle = NewGauge()
+		runtimeMetrics.MemStats.HeapInuse = NewGauge()
+		runtimeMetrics.MemStats.HeapObjects = NewGauge()
+		runtimeMetrics.MemStats.HeapReleased = NewGauge()
+		runtimeMetrics.MemStats.HeapSys = NewGauge()
+		runtimeMetrics.MemStats.LastGC = NewGauge()
+		runtimeMetrics.MemStats.Lookups = NewGauge()
+		runtimeMetrics.MemStats.Mallocs = NewGauge()
+		runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+		runtimeMetrics.MemStats.MCacheSys = NewGauge()
+		runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+		runtimeMetrics.MemStats.MSpanSys = NewGauge()
+		runtimeMetrics.MemStats.NextGC = NewGauge()
+		runtimeMetrics.MemStats.NumGC = NewGauge()
+		runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+		runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+		runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+		runtimeMetrics.MemStats.StackInuse = NewGauge()
+		runtimeMetrics.MemStats.StackSys = NewGauge()
+		runtimeMetrics.MemStats.Sys = NewGauge()
+		runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+		runtimeMetrics.NumCgoCall = NewGauge()
+		runtimeMetrics.NumGoroutine = NewGauge()
+		runtimeMetrics.NumThread = NewGauge()
+		runtimeMetrics.ReadMemStats = NewTimer()
 
-	r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
-	r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
-	r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
-	r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
-	r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
-	r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
-	r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
-	r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
-	r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
-	r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
-	r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
-	r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
-	r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
-	r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
-	r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
-	r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
-	r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
-	r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
-	r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
-	r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
-	r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
-	r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
-	r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
-	r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
-	r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
-	r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
-	r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
-	r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
-	r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
-	r.Register("runtime.NumThread", runtimeMetrics.NumThread)
-	r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+		r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+		r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+		r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+		r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+		r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+		r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+		r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+		r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+		r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+		r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+		r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+		r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+		r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+		r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+		r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+		r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+		r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+		r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+		r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+		r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+		r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+		r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+		r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+		r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+		r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+		r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+		r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+		r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+		r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+		r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+		r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+	})
 }
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
deleted file mode 100644
index c3da290..0000000
--- a/vendor/github.com/spf13/pflag/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.idea/*
-
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
deleted file mode 100644
index f8a63b3..0000000
--- a/vendor/github.com/spf13/pflag/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: false
-
-language: go
-
-go:
-  - 1.7.3
-  - 1.8.1
-  - tip
-
-matrix:
-  allow_failures:
-    - go: tip
-
-install:
-  - go get github.com/golang/lint/golint
-  - export PATH=$GOPATH/bin:$PATH
-  - go install ./...
-
-script:
-  - verify/all.sh -v
-  - go test ./...
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
deleted file mode 100644
index b052414..0000000
--- a/vendor/github.com/spf13/pflag/README.md
+++ /dev/null
@@ -1,296 +0,0 @@
-[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
-[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
-[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
-
-## Description
-
-pflag is a drop-in replacement for Go's flag package, implementing
-POSIX/GNU-style --flags.
-
-pflag is compatible with the [GNU extensions to the POSIX recommendations
-for command-line options][1]. For a more precise description, see the
-"Command-line flag syntax" section below.
-
-[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
-
-pflag is available under the same style of BSD license as the Go language,
-which can be found in the LICENSE file.
-
-## Installation
-
-pflag is available using the standard `go get` command.
-
-Install by running:
-
-    go get github.com/spf13/pflag
-
-Run tests by running:
-
-    go test github.com/spf13/pflag
-
-## Usage
-
-pflag is a drop-in replacement of Go's native flag package. If you import
-pflag under the name "flag" then all code should continue to function
-with no changes.
-
-``` go
-import flag "github.com/spf13/pflag"
-```
-
-There is one exception to this: if you directly instantiate the Flag struct
-there is one more field "Shorthand" that you will need to set.
-Most code never instantiates this struct directly, and instead uses
-functions such as String(), BoolVar(), and Var(), and is therefore
-unaffected.
-
-Define flags using flag.String(), Bool(), Int(), etc.
-
-This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
-
-``` go
-var ip *int = flag.Int("flagname", 1234, "help message for flagname")
-```
-
-If you like, you can bind the flag to a variable using the Var() functions.
-
-``` go
-var flagvar int
-func init() {
-    flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
-}
-```
-
-Or you can create custom flags that satisfy the Value interface (with
-pointer receivers) and couple them to flag parsing by
-
-``` go
-flag.Var(&flagVal, "name", "help message for flagname")
-```
-
-For such flags, the default value is just the initial value of the variable.
-
-After all flags are defined, call
-
-``` go
-flag.Parse()
-```
-
-to parse the command line into the defined flags.
-
-Flags may then be used directly. If you're using the flags themselves,
-they are all pointers; if you bind to variables, they're values.
-
-``` go
-fmt.Println("ip has value ", *ip)
-fmt.Println("flagvar has value ", flagvar)
-```
-
-There are helpers function to get values later if you have the FlagSet but
-it was difficult to keep up with all of the flag pointers in your code.
-If you have a pflag.FlagSet with a flag called 'flagname' of type int you
-can use GetInt() to get the int value. But notice that 'flagname' must exist
-and it must be an int. GetString("flagname") will fail.
-
-``` go
-i, err := flagset.GetInt("flagname")
-```
-
-After parsing, the arguments after the flag are available as the
-slice flag.Args() or individually as flag.Arg(i).
-The arguments are indexed from 0 through flag.NArg()-1.
-
-The pflag package also defines some new functions that are not in flag,
-that give one-letter shorthands for flags. You can use these by appending
-'P' to the name of any function that defines a flag.
-
-``` go
-var ip = flag.IntP("flagname", "f", 1234, "help message")
-var flagvar bool
-func init() {
-	flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
-}
-flag.VarP(&flagVal, "varname", "v", "help message")
-```
-
-Shorthand letters can be used with single dashes on the command line.
-Boolean shorthand flags can be combined with other shorthand flags.
-
-The default set of command-line flags is controlled by
-top-level functions.  The FlagSet type allows one to define
-independent sets of flags, such as to implement subcommands
-in a command-line interface. The methods of FlagSet are
-analogous to the top-level functions for the command-line
-flag set.
-
-## Setting no option default values for flags
-
-After you create a flag it is possible to set the pflag.NoOptDefVal for
-the given flag. Doing this changes the meaning of the flag slightly. If
-a flag has a NoOptDefVal and the flag is set on the command line without
-an option the flag will be set to the NoOptDefVal. For example given:
-
-``` go
-var ip = flag.IntP("flagname", "f", 1234, "help message")
-flag.Lookup("flagname").NoOptDefVal = "4321"
-```
-
-Would result in something like
-
-| Parsed Arguments | Resulting Value |
-| -------------    | -------------   |
-| --flagname=1357  | ip=1357         |
-| --flagname       | ip=4321         |
-| [nothing]        | ip=1234         |
-
-## Command line flag syntax
-
-```
---flag    // boolean flags, or flags with no option default values
---flag x  // only on flags without a default value
---flag=x
-```
-
-Unlike the flag package, a single dash before an option means something
-different than a double dash. Single dashes signify a series of shorthand
-letters for flags. All but the last shorthand letter must be boolean flags
-or a flag with a default value
-
-```
-// boolean or flags where the 'no option default value' is set
--f
--f=true
--abc
-but
--b true is INVALID
-
-// non-boolean and flags without a 'no option default value'
--n 1234
--n=1234
--n1234
-
-// mixed
--abcs "hello"
--absd="hello"
--abcs1234
-```
-
-Flag parsing stops after the terminator "--". Unlike the flag package,
-flags can be interspersed with arguments anywhere on the command line
-before this terminator.
-
-Integer flags accept 1234, 0664, 0x1234 and may be negative.
-Boolean flags (in their long form) accept 1, 0, t, f, true, false,
-TRUE, FALSE, True, False.
-Duration flags accept any input valid for time.ParseDuration.
-
-## Mutating or "Normalizing" Flag names
-
-It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
-
-**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
-
-``` go
-func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
-	from := []string{"-", "_"}
-	to := "."
-	for _, sep := range from {
-		name = strings.Replace(name, sep, to, -1)
-	}
-	return pflag.NormalizedName(name)
-}
-
-myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
-```
-
-**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
-
-``` go
-func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
-	switch name {
-	case "old-flag-name":
-		name = "new-flag-name"
-		break
-	}
-	return pflag.NormalizedName(name)
-}
-
-myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
-```
-
-## Deprecating a flag or its shorthand
-It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
-
-**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
-```go
-// deprecate a flag by specifying its name and a usage message
-flags.MarkDeprecated("badflag", "please use --good-flag instead")
-```
-This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
-
-**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
-```go
-// deprecate a flag shorthand by specifying its flag name and a usage message
-flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
-```
-This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
-
-Note that usage message is essential here, and it should not be empty.
-
-## Hidden flags
-It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
-
-**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
-```go
-// hide a flag by specifying its name
-flags.MarkHidden("secretFlag")
-```
-
-## Disable sorting of flags
-`pflag` allows you to disable sorting of flags for help and usage message.
-
-**Example**:
-```go
-flags.BoolP("verbose", "v", false, "verbose output")
-flags.String("coolflag", "yeaah", "it's really cool flag")
-flags.Int("usefulflag", 777, "sometimes it's very useful")
-flags.SortFlags = false
-flags.PrintDefaults()
-```
-**Output**:
-```
-  -v, --verbose           verbose output
-      --coolflag string   it's really cool flag (default "yeaah")
-      --usefulflag int    sometimes it's very useful (default 777)
-```
-
-
-## Supporting Go flags when using pflag
-In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
-to support flags defined by third-party dependencies (e.g. `golang/glog`).
-
-**Example**: You want to add the Go flags to the `CommandLine` flagset
-```go
-import (
-	goflag "flag"
-	flag "github.com/spf13/pflag"
-)
-
-var ip *int = flag.Int("flagname", 1234, "help message for flagname")
-
-func main() {
-	flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
-	flag.Parse()
-}
-```
-
-## More info
-
-You can see the full reference documentation of the pflag package
-[at godoc.org][3], or through go's standard documentation system by
-running `godoc -http=:6060` and browsing to
-[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
-installation.
-
-[2]: http://localhost:6060/pkg/github.com/spf13/pflag
-[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
deleted file mode 100644
index c4c5c0b..0000000
--- a/vendor/github.com/spf13/pflag/bool.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package pflag
-
-import "strconv"
-
-// optional interface to indicate boolean flags that can be
-// supplied without "=value" text
-type boolFlag interface {
-	Value
-	IsBoolFlag() bool
-}
-
-// -- bool Value
-type boolValue bool
-
-func newBoolValue(val bool, p *bool) *boolValue {
-	*p = val
-	return (*boolValue)(p)
-}
-
-func (b *boolValue) Set(s string) error {
-	v, err := strconv.ParseBool(s)
-	*b = boolValue(v)
-	return err
-}
-
-func (b *boolValue) Type() string {
-	return "bool"
-}
-
-func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
-
-func (b *boolValue) IsBoolFlag() bool { return true }
-
-func boolConv(sval string) (interface{}, error) {
-	return strconv.ParseBool(sval)
-}
-
-// GetBool return the bool value of a flag with the given name
-func (f *FlagSet) GetBool(name string) (bool, error) {
-	val, err := f.getFlagType(name, "bool", boolConv)
-	if err != nil {
-		return false, err
-	}
-	return val.(bool), nil
-}
-
-// BoolVar defines a bool flag with specified name, default value, and usage string.
-// The argument p points to a bool variable in which to store the value of the flag.
-func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
-	f.BoolVarP(p, name, "", value, usage)
-}
-
-// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
-	flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
-	flag.NoOptDefVal = "true"
-}
-
-// BoolVar defines a bool flag with specified name, default value, and usage string.
-// The argument p points to a bool variable in which to store the value of the flag.
-func BoolVar(p *bool, name string, value bool, usage string) {
-	BoolVarP(p, name, "", value, usage)
-}
-
-// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
-func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
-	flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
-	flag.NoOptDefVal = "true"
-}
-
-// Bool defines a bool flag with specified name, default value, and usage string.
-// The return value is the address of a bool variable that stores the value of the flag.
-func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
-	return f.BoolP(name, "", value, usage)
-}
-
-// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
-	p := new(bool)
-	f.BoolVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Bool defines a bool flag with specified name, default value, and usage string.
-// The return value is the address of a bool variable that stores the value of the flag.
-func Bool(name string, value bool, usage string) *bool {
-	return BoolP(name, "", value, usage)
-}
-
-// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
-func BoolP(name, shorthand string, value bool, usage string) *bool {
-	b := CommandLine.BoolP(name, shorthand, value, usage)
-	return b
-}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
deleted file mode 100644
index 5af02f1..0000000
--- a/vendor/github.com/spf13/pflag/bool_slice.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package pflag
-
-import (
-	"io"
-	"strconv"
-	"strings"
-)
-
-// -- boolSlice Value
-type boolSliceValue struct {
-	value   *[]bool
-	changed bool
-}
-
-func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
-	bsv := new(boolSliceValue)
-	bsv.value = p
-	*bsv.value = val
-	return bsv
-}
-
-// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
-// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
-func (s *boolSliceValue) Set(val string) error {
-
-	// remove all quote characters
-	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
-
-	// read flag arguments with CSV parser
-	boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
-	if err != nil && err != io.EOF {
-		return err
-	}
-
-	// parse boolean values into slice
-	out := make([]bool, 0, len(boolStrSlice))
-	for _, boolStr := range boolStrSlice {
-		b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
-		if err != nil {
-			return err
-		}
-		out = append(out, b)
-	}
-
-	if !s.changed {
-		*s.value = out
-	} else {
-		*s.value = append(*s.value, out...)
-	}
-
-	s.changed = true
-
-	return nil
-}
-
-// Type returns a string that uniquely represents this flag's type.
-func (s *boolSliceValue) Type() string {
-	return "boolSlice"
-}
-
-// String defines a "native" format for this boolean slice flag value.
-func (s *boolSliceValue) String() string {
-
-	boolStrSlice := make([]string, len(*s.value))
-	for i, b := range *s.value {
-		boolStrSlice[i] = strconv.FormatBool(b)
-	}
-
-	out, _ := writeAsCSV(boolStrSlice)
-
-	return "[" + out + "]"
-}
-
-func boolSliceConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// Empty string would cause a slice with one (empty) entry
-	if len(val) == 0 {
-		return []bool{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make([]bool, len(ss))
-	for i, t := range ss {
-		var err error
-		out[i], err = strconv.ParseBool(t)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return out, nil
-}
-
-// GetBoolSlice returns the []bool value of a flag with the given name.
-func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
-	val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
-	if err != nil {
-		return []bool{}, err
-	}
-	return val.([]bool), nil
-}
-
-// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
-// The argument p points to a []bool variable in which to store the value of the flag.
-func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
-	f.VarP(newBoolSliceValue(value, p), name, "", usage)
-}
-
-// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
-	f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
-}
-
-// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
-// The argument p points to a []bool variable in which to store the value of the flag.
-func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
-	CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
-}
-
-// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
-	CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
-}
-
-// BoolSlice defines a []bool flag with specified name, default value, and usage string.
-// The return value is the address of a []bool variable that stores the value of the flag.
-func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
-	p := []bool{}
-	f.BoolSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
-	p := []bool{}
-	f.BoolSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// BoolSlice defines a []bool flag with specified name, default value, and usage string.
-// The return value is the address of a []bool variable that stores the value of the flag.
-func BoolSlice(name string, value []bool, usage string) *[]bool {
-	return CommandLine.BoolSliceP(name, "", value, usage)
-}
-
-// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
-func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
-	return CommandLine.BoolSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
deleted file mode 100644
index 67d5304..0000000
--- a/vendor/github.com/spf13/pflag/bytes.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package pflag
-
-import (
-	"encoding/base64"
-	"encoding/hex"
-	"fmt"
-	"strings"
-)
-
-// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
-type bytesHexValue []byte
-
-// String implements pflag.Value.String.
-func (bytesHex bytesHexValue) String() string {
-	return fmt.Sprintf("%X", []byte(bytesHex))
-}
-
-// Set implements pflag.Value.Set.
-func (bytesHex *bytesHexValue) Set(value string) error {
-	bin, err := hex.DecodeString(strings.TrimSpace(value))
-
-	if err != nil {
-		return err
-	}
-
-	*bytesHex = bin
-
-	return nil
-}
-
-// Type implements pflag.Value.Type.
-func (*bytesHexValue) Type() string {
-	return "bytesHex"
-}
-
-func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
-	*p = val
-	return (*bytesHexValue)(p)
-}
-
-func bytesHexConv(sval string) (interface{}, error) {
-
-	bin, err := hex.DecodeString(sval)
-
-	if err == nil {
-		return bin, nil
-	}
-
-	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
-}
-
-// GetBytesHex return the []byte value of a flag with the given name
-func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
-	val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
-
-	if err != nil {
-		return []byte{}, err
-	}
-
-	return val.([]byte), nil
-}
-
-// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
-// The argument p points to an []byte variable in which to store the value of the flag.
-func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
-	f.VarP(newBytesHexValue(value, p), name, "", usage)
-}
-
-// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
-	f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
-}
-
-// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
-// The argument p points to an []byte variable in which to store the value of the flag.
-func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
-	CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
-}
-
-// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
-func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
-	CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
-}
-
-// BytesHex defines an []byte flag with specified name, default value, and usage string.
-// The return value is the address of an []byte variable that stores the value of the flag.
-func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
-	p := new([]byte)
-	f.BytesHexVarP(p, name, "", value, usage)
-	return p
-}
-
-// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
-	p := new([]byte)
-	f.BytesHexVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// BytesHex defines an []byte flag with specified name, default value, and usage string.
-// The return value is the address of an []byte variable that stores the value of the flag.
-func BytesHex(name string, value []byte, usage string) *[]byte {
-	return CommandLine.BytesHexP(name, "", value, usage)
-}
-
-// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
-func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
-	return CommandLine.BytesHexP(name, shorthand, value, usage)
-}
-
-// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
-type bytesBase64Value []byte
-
-// String implements pflag.Value.String.
-func (bytesBase64 bytesBase64Value) String() string {
-	return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
-}
-
-// Set implements pflag.Value.Set.
-func (bytesBase64 *bytesBase64Value) Set(value string) error {
-	bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
-
-	if err != nil {
-		return err
-	}
-
-	*bytesBase64 = bin
-
-	return nil
-}
-
-// Type implements pflag.Value.Type.
-func (*bytesBase64Value) Type() string {
-	return "bytesBase64"
-}
-
-func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
-	*p = val
-	return (*bytesBase64Value)(p)
-}
-
-func bytesBase64ValueConv(sval string) (interface{}, error) {
-
-	bin, err := base64.StdEncoding.DecodeString(sval)
-	if err == nil {
-		return bin, nil
-	}
-
-	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
-}
-
-// GetBytesBase64 return the []byte value of a flag with the given name
-func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
-	val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
-
-	if err != nil {
-		return []byte{}, err
-	}
-
-	return val.([]byte), nil
-}
-
-// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
-// The argument p points to an []byte variable in which to store the value of the flag.
-func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
-	f.VarP(newBytesBase64Value(value, p), name, "", usage)
-}
-
-// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
-	f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
-}
-
-// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
-// The argument p points to an []byte variable in which to store the value of the flag.
-func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
-	CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
-}
-
-// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
-func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
-	CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
-}
-
-// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
-// The return value is the address of an []byte variable that stores the value of the flag.
-func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
-	p := new([]byte)
-	f.BytesBase64VarP(p, name, "", value, usage)
-	return p
-}
-
-// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
-	p := new([]byte)
-	f.BytesBase64VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
-// The return value is the address of an []byte variable that stores the value of the flag.
-func BytesBase64(name string, value []byte, usage string) *[]byte {
-	return CommandLine.BytesBase64P(name, "", value, usage)
-}
-
-// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
-func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
-	return CommandLine.BytesBase64P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
deleted file mode 100644
index aa126e4..0000000
--- a/vendor/github.com/spf13/pflag/count.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- count Value
-type countValue int
-
-func newCountValue(val int, p *int) *countValue {
-	*p = val
-	return (*countValue)(p)
-}
-
-func (i *countValue) Set(s string) error {
-	// "+1" means that no specific value was passed, so increment
-	if s == "+1" {
-		*i = countValue(*i + 1)
-		return nil
-	}
-	v, err := strconv.ParseInt(s, 0, 0)
-	*i = countValue(v)
-	return err
-}
-
-func (i *countValue) Type() string {
-	return "count"
-}
-
-func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
-
-func countConv(sval string) (interface{}, error) {
-	i, err := strconv.Atoi(sval)
-	if err != nil {
-		return nil, err
-	}
-	return i, nil
-}
-
-// GetCount return the int value of a flag with the given name
-func (f *FlagSet) GetCount(name string) (int, error) {
-	val, err := f.getFlagType(name, "count", countConv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int), nil
-}
-
-// CountVar defines a count flag with specified name, default value, and usage string.
-// The argument p points to an int variable in which to store the value of the flag.
-// A count flag will add 1 to its value evey time it is found on the command line
-func (f *FlagSet) CountVar(p *int, name string, usage string) {
-	f.CountVarP(p, name, "", usage)
-}
-
-// CountVarP is like CountVar only take a shorthand for the flag name.
-func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
-	flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
-	flag.NoOptDefVal = "+1"
-}
-
-// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
-func CountVar(p *int, name string, usage string) {
-	CommandLine.CountVar(p, name, usage)
-}
-
-// CountVarP is like CountVar only take a shorthand for the flag name.
-func CountVarP(p *int, name, shorthand string, usage string) {
-	CommandLine.CountVarP(p, name, shorthand, usage)
-}
-
-// Count defines a count flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-// A count flag will add 1 to its value evey time it is found on the command line
-func (f *FlagSet) Count(name string, usage string) *int {
-	p := new(int)
-	f.CountVarP(p, name, "", usage)
-	return p
-}
-
-// CountP is like Count only takes a shorthand for the flag name.
-func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
-	p := new(int)
-	f.CountVarP(p, name, shorthand, usage)
-	return p
-}
-
-// Count defines a count flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-// A count flag will add 1 to its value evey time it is found on the command line
-func Count(name string, usage string) *int {
-	return CommandLine.CountP(name, "", usage)
-}
-
-// CountP is like Count only takes a shorthand for the flag name.
-func CountP(name, shorthand string, usage string) *int {
-	return CommandLine.CountP(name, shorthand, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
deleted file mode 100644
index e9debef..0000000
--- a/vendor/github.com/spf13/pflag/duration.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package pflag
-
-import (
-	"time"
-)
-
-// -- time.Duration Value
-type durationValue time.Duration
-
-func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
-	*p = val
-	return (*durationValue)(p)
-}
-
-func (d *durationValue) Set(s string) error {
-	v, err := time.ParseDuration(s)
-	*d = durationValue(v)
-	return err
-}
-
-func (d *durationValue) Type() string {
-	return "duration"
-}
-
-func (d *durationValue) String() string { return (*time.Duration)(d).String() }
-
-func durationConv(sval string) (interface{}, error) {
-	return time.ParseDuration(sval)
-}
-
-// GetDuration return the duration value of a flag with the given name
-func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
-	val, err := f.getFlagType(name, "duration", durationConv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(time.Duration), nil
-}
-
-// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
-// The argument p points to a time.Duration variable in which to store the value of the flag.
-func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
-	f.VarP(newDurationValue(value, p), name, "", usage)
-}
-
-// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
-	f.VarP(newDurationValue(value, p), name, shorthand, usage)
-}
-
-// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
-// The argument p points to a time.Duration variable in which to store the value of the flag.
-func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
-	CommandLine.VarP(newDurationValue(value, p), name, "", usage)
-}
-
-// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
-func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
-	CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
-}
-
-// Duration defines a time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a time.Duration variable that stores the value of the flag.
-func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
-	p := new(time.Duration)
-	f.DurationVarP(p, name, "", value, usage)
-	return p
-}
-
-// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
-	p := new(time.Duration)
-	f.DurationVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Duration defines a time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a time.Duration variable that stores the value of the flag.
-func Duration(name string, value time.Duration, usage string) *time.Duration {
-	return CommandLine.DurationP(name, "", value, usage)
-}
-
-// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
-func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
-	return CommandLine.DurationP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
deleted file mode 100644
index 52c6b6d..0000000
--- a/vendor/github.com/spf13/pflag/duration_slice.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"strings"
-	"time"
-)
-
-// -- durationSlice Value
-type durationSliceValue struct {
-	value   *[]time.Duration
-	changed bool
-}
-
-func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
-	dsv := new(durationSliceValue)
-	dsv.value = p
-	*dsv.value = val
-	return dsv
-}
-
-func (s *durationSliceValue) Set(val string) error {
-	ss := strings.Split(val, ",")
-	out := make([]time.Duration, len(ss))
-	for i, d := range ss {
-		var err error
-		out[i], err = time.ParseDuration(d)
-		if err != nil {
-			return err
-		}
-
-	}
-	if !s.changed {
-		*s.value = out
-	} else {
-		*s.value = append(*s.value, out...)
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *durationSliceValue) Type() string {
-	return "durationSlice"
-}
-
-func (s *durationSliceValue) String() string {
-	out := make([]string, len(*s.value))
-	for i, d := range *s.value {
-		out[i] = fmt.Sprintf("%s", d)
-	}
-	return "[" + strings.Join(out, ",") + "]"
-}
-
-func durationSliceConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// Empty string would cause a slice with one (empty) entry
-	if len(val) == 0 {
-		return []time.Duration{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make([]time.Duration, len(ss))
-	for i, d := range ss {
-		var err error
-		out[i], err = time.ParseDuration(d)
-		if err != nil {
-			return nil, err
-		}
-
-	}
-	return out, nil
-}
-
-// GetDurationSlice returns the []time.Duration value of a flag with the given name
-func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
-	val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
-	if err != nil {
-		return []time.Duration{}, err
-	}
-	return val.([]time.Duration), nil
-}
-
-// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
-// The argument p points to a []time.Duration variable in which to store the value of the flag.
-func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
-	f.VarP(newDurationSliceValue(value, p), name, "", usage)
-}
-
-// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
-	f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
-}
-
-// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
-// The argument p points to a duration[] variable in which to store the value of the flag.
-func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
-	CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
-}
-
-// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
-	CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
-}
-
-// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a []time.Duration variable that stores the value of the flag.
-func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
-	p := []time.Duration{}
-	f.DurationSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
-	p := []time.Duration{}
-	f.DurationSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
-// The return value is the address of a []time.Duration variable that stores the value of the flag.
-func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
-	return CommandLine.DurationSliceP(name, "", value, usage)
-}
-
-// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
-func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
-	return CommandLine.DurationSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
deleted file mode 100644
index 9beeda8..0000000
--- a/vendor/github.com/spf13/pflag/flag.go
+++ /dev/null
@@ -1,1227 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package pflag is a drop-in replacement for Go's flag package, implementing
-POSIX/GNU-style --flags.
-
-pflag is compatible with the GNU extensions to the POSIX recommendations
-for command-line options. See
-http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
-
-Usage:
-
-pflag is a drop-in replacement of Go's native flag package. If you import
-pflag under the name "flag" then all code should continue to function
-with no changes.
-
-	import flag "github.com/spf13/pflag"
-
-There is one exception to this: if you directly instantiate the Flag struct
-there is one more field "Shorthand" that you will need to set.
-Most code never instantiates this struct directly, and instead uses
-functions such as String(), BoolVar(), and Var(), and is therefore
-unaffected.
-
-Define flags using flag.String(), Bool(), Int(), etc.
-
-This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
-	var ip = flag.Int("flagname", 1234, "help message for flagname")
-If you like, you can bind the flag to a variable using the Var() functions.
-	var flagvar int
-	func init() {
-		flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
-	}
-Or you can create custom flags that satisfy the Value interface (with
-pointer receivers) and couple them to flag parsing by
-	flag.Var(&flagVal, "name", "help message for flagname")
-For such flags, the default value is just the initial value of the variable.
-
-After all flags are defined, call
-	flag.Parse()
-to parse the command line into the defined flags.
-
-Flags may then be used directly. If you're using the flags themselves,
-they are all pointers; if you bind to variables, they're values.
-	fmt.Println("ip has value ", *ip)
-	fmt.Println("flagvar has value ", flagvar)
-
-After parsing, the arguments after the flag are available as the
-slice flag.Args() or individually as flag.Arg(i).
-The arguments are indexed from 0 through flag.NArg()-1.
-
-The pflag package also defines some new functions that are not in flag,
-that give one-letter shorthands for flags. You can use these by appending
-'P' to the name of any function that defines a flag.
-	var ip = flag.IntP("flagname", "f", 1234, "help message")
-	var flagvar bool
-	func init() {
-		flag.BoolVarP("boolname", "b", true, "help message")
-	}
-	flag.VarP(&flagVar, "varname", "v", 1234, "help message")
-Shorthand letters can be used with single dashes on the command line.
-Boolean shorthand flags can be combined with other shorthand flags.
-
-Command line flag syntax:
-	--flag    // boolean flags only
-	--flag=x
-
-Unlike the flag package, a single dash before an option means something
-different than a double dash. Single dashes signify a series of shorthand
-letters for flags. All but the last shorthand letter must be boolean flags.
-	// boolean flags
-	-f
-	-abc
-	// non-boolean flags
-	-n 1234
-	-Ifile
-	// mixed
-	-abcs "hello"
-	-abcn1234
-
-Flag parsing stops after the terminator "--". Unlike the flag package,
-flags can be interspersed with arguments anywhere on the command line
-before this terminator.
-
-Integer flags accept 1234, 0664, 0x1234 and may be negative.
-Boolean flags (in their long form) accept 1, 0, t, f, true, false,
-TRUE, FALSE, True, False.
-Duration flags accept any input valid for time.ParseDuration.
-
-The default set of command-line flags is controlled by
-top-level functions.  The FlagSet type allows one to define
-independent sets of flags, such as to implement subcommands
-in a command-line interface. The methods of FlagSet are
-analogous to the top-level functions for the command-line
-flag set.
-*/
-package pflag
-
-import (
-	"bytes"
-	"errors"
-	goflag "flag"
-	"fmt"
-	"io"
-	"os"
-	"sort"
-	"strings"
-)
-
-// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
-var ErrHelp = errors.New("pflag: help requested")
-
-// ErrorHandling defines how to handle flag parsing errors.
-type ErrorHandling int
-
-const (
-	// ContinueOnError will return an err from Parse() if an error is found
-	ContinueOnError ErrorHandling = iota
-	// ExitOnError will call os.Exit(2) if an error is found when parsing
-	ExitOnError
-	// PanicOnError will panic() if an error is found when parsing flags
-	PanicOnError
-)
-
-// ParseErrorsWhitelist defines the parsing errors that can be ignored
-type ParseErrorsWhitelist struct {
-	// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
-	UnknownFlags bool
-}
-
-// NormalizedName is a flag name that has been normalized according to rules
-// for the FlagSet (e.g. making '-' and '_' equivalent).
-type NormalizedName string
-
-// A FlagSet represents a set of defined flags.
-type FlagSet struct {
-	// Usage is the function called when an error occurs while parsing flags.
-	// The field is a function (not a method) that may be changed to point to
-	// a custom error handler.
-	Usage func()
-
-	// SortFlags is used to indicate, if user wants to have sorted flags in
-	// help/usage messages.
-	SortFlags bool
-
-	// ParseErrorsWhitelist is used to configure a whitelist of errors
-	ParseErrorsWhitelist ParseErrorsWhitelist
-
-	name              string
-	parsed            bool
-	actual            map[NormalizedName]*Flag
-	orderedActual     []*Flag
-	sortedActual      []*Flag
-	formal            map[NormalizedName]*Flag
-	orderedFormal     []*Flag
-	sortedFormal      []*Flag
-	shorthands        map[byte]*Flag
-	args              []string // arguments after flags
-	argsLenAtDash     int      // len(args) when a '--' was located when parsing, or -1 if no --
-	errorHandling     ErrorHandling
-	output            io.Writer // nil means stderr; use out() accessor
-	interspersed      bool      // allow interspersed option/non-option args
-	normalizeNameFunc func(f *FlagSet, name string) NormalizedName
-
-	addedGoFlagSets []*goflag.FlagSet
-}
-
-// A Flag represents the state of a flag.
-type Flag struct {
-	Name                string              // name as it appears on command line
-	Shorthand           string              // one-letter abbreviated flag
-	Usage               string              // help message
-	Value               Value               // value as set
-	DefValue            string              // default value (as text); for usage message
-	Changed             bool                // If the user set the value (or if left to default)
-	NoOptDefVal         string              // default value (as text); if the flag is on the command line without any options
-	Deprecated          string              // If this flag is deprecated, this string is the new or now thing to use
-	Hidden              bool                // used by cobra.Command to allow flags to be hidden from help/usage text
-	ShorthandDeprecated string              // If the shorthand of this flag is deprecated, this string is the new or now thing to use
-	Annotations         map[string][]string // used by cobra.Command bash autocomple code
-}
-
-// Value is the interface to the dynamic value stored in a flag.
-// (The default value is represented as a string.)
-type Value interface {
-	String() string
-	Set(string) error
-	Type() string
-}
-
-// sortFlags returns the flags as a slice in lexicographical sorted order.
-func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
-	list := make(sort.StringSlice, len(flags))
-	i := 0
-	for k := range flags {
-		list[i] = string(k)
-		i++
-	}
-	list.Sort()
-	result := make([]*Flag, len(list))
-	for i, name := range list {
-		result[i] = flags[NormalizedName(name)]
-	}
-	return result
-}
-
-// SetNormalizeFunc allows you to add a function which can translate flag names.
-// Flags added to the FlagSet will be translated and then when anything tries to
-// look up the flag that will also be translated. So it would be possible to create
-// a flag named "getURL" and have it translated to "geturl".  A user could then pass
-// "--getUrl" which may also be translated to "geturl" and everything will work.
-func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
-	f.normalizeNameFunc = n
-	f.sortedFormal = f.sortedFormal[:0]
-	for fname, flag := range f.formal {
-		nname := f.normalizeFlagName(flag.Name)
-		if fname == nname {
-			continue
-		}
-		flag.Name = string(nname)
-		delete(f.formal, fname)
-		f.formal[nname] = flag
-		if _, set := f.actual[fname]; set {
-			delete(f.actual, fname)
-			f.actual[nname] = flag
-		}
-	}
-}
-
-// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
-// does no translation, if not set previously.
-func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
-	if f.normalizeNameFunc != nil {
-		return f.normalizeNameFunc
-	}
-	return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
-}
-
-func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
-	n := f.GetNormalizeFunc()
-	return n(f, name)
-}
-
-func (f *FlagSet) out() io.Writer {
-	if f.output == nil {
-		return os.Stderr
-	}
-	return f.output
-}
-
-// SetOutput sets the destination for usage and error messages.
-// If output is nil, os.Stderr is used.
-func (f *FlagSet) SetOutput(output io.Writer) {
-	f.output = output
-}
-
-// VisitAll visits the flags in lexicographical order or
-// in primordial order if f.SortFlags is false, calling fn for each.
-// It visits all flags, even those not set.
-func (f *FlagSet) VisitAll(fn func(*Flag)) {
-	if len(f.formal) == 0 {
-		return
-	}
-
-	var flags []*Flag
-	if f.SortFlags {
-		if len(f.formal) != len(f.sortedFormal) {
-			f.sortedFormal = sortFlags(f.formal)
-		}
-		flags = f.sortedFormal
-	} else {
-		flags = f.orderedFormal
-	}
-
-	for _, flag := range flags {
-		fn(flag)
-	}
-}
-
-// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
-func (f *FlagSet) HasFlags() bool {
-	return len(f.formal) > 0
-}
-
-// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
-// that are not hidden.
-func (f *FlagSet) HasAvailableFlags() bool {
-	for _, flag := range f.formal {
-		if !flag.Hidden {
-			return true
-		}
-	}
-	return false
-}
-
-// VisitAll visits the command-line flags in lexicographical order or
-// in primordial order if f.SortFlags is false, calling fn for each.
-// It visits all flags, even those not set.
-func VisitAll(fn func(*Flag)) {
-	CommandLine.VisitAll(fn)
-}
-
-// Visit visits the flags in lexicographical order or
-// in primordial order if f.SortFlags is false, calling fn for each.
-// It visits only those flags that have been set.
-func (f *FlagSet) Visit(fn func(*Flag)) {
-	if len(f.actual) == 0 {
-		return
-	}
-
-	var flags []*Flag
-	if f.SortFlags {
-		if len(f.actual) != len(f.sortedActual) {
-			f.sortedActual = sortFlags(f.actual)
-		}
-		flags = f.sortedActual
-	} else {
-		flags = f.orderedActual
-	}
-
-	for _, flag := range flags {
-		fn(flag)
-	}
-}
-
-// Visit visits the command-line flags in lexicographical order or
-// in primordial order if f.SortFlags is false, calling fn for each.
-// It visits only those flags that have been set.
-func Visit(fn func(*Flag)) {
-	CommandLine.Visit(fn)
-}
-
-// Lookup returns the Flag structure of the named flag, returning nil if none exists.
-func (f *FlagSet) Lookup(name string) *Flag {
-	return f.lookup(f.normalizeFlagName(name))
-}
-
-// ShorthandLookup returns the Flag structure of the short handed flag,
-// returning nil if none exists.
-// It panics, if len(name) > 1.
-func (f *FlagSet) ShorthandLookup(name string) *Flag {
-	if name == "" {
-		return nil
-	}
-	if len(name) > 1 {
-		msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
-		fmt.Fprintf(f.out(), msg)
-		panic(msg)
-	}
-	c := name[0]
-	return f.shorthands[c]
-}
-
-// lookup returns the Flag structure of the named flag, returning nil if none exists.
-func (f *FlagSet) lookup(name NormalizedName) *Flag {
-	return f.formal[name]
-}
-
-// func to return a given type for a given flag name
-func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
-	flag := f.Lookup(name)
-	if flag == nil {
-		err := fmt.Errorf("flag accessed but not defined: %s", name)
-		return nil, err
-	}
-
-	if flag.Value.Type() != ftype {
-		err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
-		return nil, err
-	}
-
-	sval := flag.Value.String()
-	result, err := convFunc(sval)
-	if err != nil {
-		return nil, err
-	}
-	return result, nil
-}
-
-// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
-// found during arg parsing. This allows your program to know which args were
-// before the -- and which came after.
-func (f *FlagSet) ArgsLenAtDash() int {
-	return f.argsLenAtDash
-}
-
-// MarkDeprecated indicated that a flag is deprecated in your program. It will
-// continue to function but will not show up in help or usage messages. Using
-// this flag will also print the given usageMessage.
-func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
-	flag := f.Lookup(name)
-	if flag == nil {
-		return fmt.Errorf("flag %q does not exist", name)
-	}
-	if usageMessage == "" {
-		return fmt.Errorf("deprecated message for flag %q must be set", name)
-	}
-	flag.Deprecated = usageMessage
-	flag.Hidden = true
-	return nil
-}
-
-// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
-// program. It will continue to function but will not show up in help or usage
-// messages. Using this flag will also print the given usageMessage.
-func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
-	flag := f.Lookup(name)
-	if flag == nil {
-		return fmt.Errorf("flag %q does not exist", name)
-	}
-	if usageMessage == "" {
-		return fmt.Errorf("deprecated message for flag %q must be set", name)
-	}
-	flag.ShorthandDeprecated = usageMessage
-	return nil
-}
-
-// MarkHidden sets a flag to 'hidden' in your program. It will continue to
-// function but will not show up in help or usage messages.
-func (f *FlagSet) MarkHidden(name string) error {
-	flag := f.Lookup(name)
-	if flag == nil {
-		return fmt.Errorf("flag %q does not exist", name)
-	}
-	flag.Hidden = true
-	return nil
-}
-
-// Lookup returns the Flag structure of the named command-line flag,
-// returning nil if none exists.
-func Lookup(name string) *Flag {
-	return CommandLine.Lookup(name)
-}
-
-// ShorthandLookup returns the Flag structure of the short handed flag,
-// returning nil if none exists.
-func ShorthandLookup(name string) *Flag {
-	return CommandLine.ShorthandLookup(name)
-}
-
-// Set sets the value of the named flag.
-func (f *FlagSet) Set(name, value string) error {
-	normalName := f.normalizeFlagName(name)
-	flag, ok := f.formal[normalName]
-	if !ok {
-		return fmt.Errorf("no such flag -%v", name)
-	}
-
-	err := flag.Value.Set(value)
-	if err != nil {
-		var flagName string
-		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
-			flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
-		} else {
-			flagName = fmt.Sprintf("--%s", flag.Name)
-		}
-		return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
-	}
-
-	if !flag.Changed {
-		if f.actual == nil {
-			f.actual = make(map[NormalizedName]*Flag)
-		}
-		f.actual[normalName] = flag
-		f.orderedActual = append(f.orderedActual, flag)
-
-		flag.Changed = true
-	}
-
-	if flag.Deprecated != "" {
-		fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
-	}
-	return nil
-}
-
-// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
-// This is sometimes used by spf13/cobra programs which want to generate additional
-// bash completion information.
-func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
-	normalName := f.normalizeFlagName(name)
-	flag, ok := f.formal[normalName]
-	if !ok {
-		return fmt.Errorf("no such flag -%v", name)
-	}
-	if flag.Annotations == nil {
-		flag.Annotations = map[string][]string{}
-	}
-	flag.Annotations[key] = values
-	return nil
-}
-
-// Changed returns true if the flag was explicitly set during Parse() and false
-// otherwise
-func (f *FlagSet) Changed(name string) bool {
-	flag := f.Lookup(name)
-	// If a flag doesn't exist, it wasn't changed....
-	if flag == nil {
-		return false
-	}
-	return flag.Changed
-}
-
-// Set sets the value of the named command-line flag.
-func Set(name, value string) error {
-	return CommandLine.Set(name, value)
-}
-
-// PrintDefaults prints, to standard error unless configured
-// otherwise, the default values of all defined flags in the set.
-func (f *FlagSet) PrintDefaults() {
-	usages := f.FlagUsages()
-	fmt.Fprint(f.out(), usages)
-}
-
-// defaultIsZeroValue returns true if the default value for this flag represents
-// a zero value.
-func (f *Flag) defaultIsZeroValue() bool {
-	switch f.Value.(type) {
-	case boolFlag:
-		return f.DefValue == "false"
-	case *durationValue:
-		// Beginning in Go 1.7, duration zero values are "0s"
-		return f.DefValue == "0" || f.DefValue == "0s"
-	case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
-		return f.DefValue == "0"
-	case *stringValue:
-		return f.DefValue == ""
-	case *ipValue, *ipMaskValue, *ipNetValue:
-		return f.DefValue == "<nil>"
-	case *intSliceValue, *stringSliceValue, *stringArrayValue:
-		return f.DefValue == "[]"
-	default:
-		switch f.Value.String() {
-		case "false":
-			return true
-		case "<nil>":
-			return true
-		case "":
-			return true
-		case "0":
-			return true
-		}
-		return false
-	}
-}
-
-// UnquoteUsage extracts a back-quoted name from the usage
-// string for a flag and returns it and the un-quoted usage.
-// Given "a `name` to show" it returns ("name", "a name to show").
-// If there are no back quotes, the name is an educated guess of the
-// type of the flag's value, or the empty string if the flag is boolean.
-func UnquoteUsage(flag *Flag) (name string, usage string) {
-	// Look for a back-quoted name, but avoid the strings package.
-	usage = flag.Usage
-	for i := 0; i < len(usage); i++ {
-		if usage[i] == '`' {
-			for j := i + 1; j < len(usage); j++ {
-				if usage[j] == '`' {
-					name = usage[i+1 : j]
-					usage = usage[:i] + name + usage[j+1:]
-					return name, usage
-				}
-			}
-			break // Only one back quote; use type name.
-		}
-	}
-
-	name = flag.Value.Type()
-	switch name {
-	case "bool":
-		name = ""
-	case "float64":
-		name = "float"
-	case "int64":
-		name = "int"
-	case "uint64":
-		name = "uint"
-	case "stringSlice":
-		name = "strings"
-	case "intSlice":
-		name = "ints"
-	case "uintSlice":
-		name = "uints"
-	case "boolSlice":
-		name = "bools"
-	}
-
-	return
-}
-
-// Splits the string `s` on whitespace into an initial substring up to
-// `i` runes in length and the remainder. Will go `slop` over `i` if
-// that encompasses the entire string (which allows the caller to
-// avoid short orphan words on the final line).
-func wrapN(i, slop int, s string) (string, string) {
-	if i+slop > len(s) {
-		return s, ""
-	}
-
-	w := strings.LastIndexAny(s[:i], " \t\n")
-	if w <= 0 {
-		return s, ""
-	}
-	nlPos := strings.LastIndex(s[:i], "\n")
-	if nlPos > 0 && nlPos < w {
-		return s[:nlPos], s[nlPos+1:]
-	}
-	return s[:w], s[w+1:]
-}
-
-// Wraps the string `s` to a maximum width `w` with leading indent
-// `i`. The first line is not indented (this is assumed to be done by
-// caller). Pass `w` == 0 to do no wrapping
-func wrap(i, w int, s string) string {
-	if w == 0 {
-		return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
-	}
-
-	// space between indent i and end of line width w into which
-	// we should wrap the text.
-	wrap := w - i
-
-	var r, l string
-
-	// Not enough space for sensible wrapping. Wrap as a block on
-	// the next line instead.
-	if wrap < 24 {
-		i = 16
-		wrap = w - i
-		r += "\n" + strings.Repeat(" ", i)
-	}
-	// If still not enough space then don't even try to wrap.
-	if wrap < 24 {
-		return strings.Replace(s, "\n", r, -1)
-	}
-
-	// Try to avoid short orphan words on the final line, by
-	// allowing wrapN to go a bit over if that would fit in the
-	// remainder of the line.
-	slop := 5
-	wrap = wrap - slop
-
-	// Handle first line, which is indented by the caller (or the
-	// special case above)
-	l, s = wrapN(wrap, slop, s)
-	r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
-
-	// Now wrap the rest
-	for s != "" {
-		var t string
-
-		t, s = wrapN(wrap, slop, s)
-		r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
-	}
-
-	return r
-
-}
-
-// FlagUsagesWrapped returns a string containing the usage information
-// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
-// wrapping)
-func (f *FlagSet) FlagUsagesWrapped(cols int) string {
-	buf := new(bytes.Buffer)
-
-	lines := make([]string, 0, len(f.formal))
-
-	maxlen := 0
-	f.VisitAll(func(flag *Flag) {
-		if flag.Hidden {
-			return
-		}
-
-		line := ""
-		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
-			line = fmt.Sprintf("  -%s, --%s", flag.Shorthand, flag.Name)
-		} else {
-			line = fmt.Sprintf("      --%s", flag.Name)
-		}
-
-		varname, usage := UnquoteUsage(flag)
-		if varname != "" {
-			line += " " + varname
-		}
-		if flag.NoOptDefVal != "" {
-			switch flag.Value.Type() {
-			case "string":
-				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
-			case "bool":
-				if flag.NoOptDefVal != "true" {
-					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
-				}
-			case "count":
-				if flag.NoOptDefVal != "+1" {
-					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
-				}
-			default:
-				line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
-			}
-		}
-
-		// This special character will be replaced with spacing once the
-		// correct alignment is calculated
-		line += "\x00"
-		if len(line) > maxlen {
-			maxlen = len(line)
-		}
-
-		line += usage
-		if !flag.defaultIsZeroValue() {
-			if flag.Value.Type() == "string" {
-				line += fmt.Sprintf(" (default %q)", flag.DefValue)
-			} else {
-				line += fmt.Sprintf(" (default %s)", flag.DefValue)
-			}
-		}
-		if len(flag.Deprecated) != 0 {
-			line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
-		}
-
-		lines = append(lines, line)
-	})
-
-	for _, line := range lines {
-		sidx := strings.Index(line, "\x00")
-		spacing := strings.Repeat(" ", maxlen-sidx)
-		// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
-		fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
-	}
-
-	return buf.String()
-}
-
-// FlagUsages returns a string containing the usage information for all flags in
-// the FlagSet
-func (f *FlagSet) FlagUsages() string {
-	return f.FlagUsagesWrapped(0)
-}
-
-// PrintDefaults prints to standard error the default values of all defined command-line flags.
-func PrintDefaults() {
-	CommandLine.PrintDefaults()
-}
-
-// defaultUsage is the default function to print a usage message.
-func defaultUsage(f *FlagSet) {
-	fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
-	f.PrintDefaults()
-}
-
-// NOTE: Usage is not just defaultUsage(CommandLine)
-// because it serves (via godoc flag Usage) as the example
-// for how to write your own usage function.
-
-// Usage prints to standard error a usage message documenting all defined command-line flags.
-// The function is a variable that may be changed to point to a custom function.
-// By default it prints a simple header and calls PrintDefaults; for details about the
-// format of the output and how to control it, see the documentation for PrintDefaults.
-var Usage = func() {
-	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-	PrintDefaults()
-}
-
-// NFlag returns the number of flags that have been set.
-func (f *FlagSet) NFlag() int { return len(f.actual) }
-
-// NFlag returns the number of command-line flags that have been set.
-func NFlag() int { return len(CommandLine.actual) }
-
-// Arg returns the i'th argument.  Arg(0) is the first remaining argument
-// after flags have been processed.
-func (f *FlagSet) Arg(i int) string {
-	if i < 0 || i >= len(f.args) {
-		return ""
-	}
-	return f.args[i]
-}
-
-// Arg returns the i'th command-line argument.  Arg(0) is the first remaining argument
-// after flags have been processed.
-func Arg(i int) string {
-	return CommandLine.Arg(i)
-}
-
-// NArg is the number of arguments remaining after flags have been processed.
-func (f *FlagSet) NArg() int { return len(f.args) }
-
-// NArg is the number of arguments remaining after flags have been processed.
-func NArg() int { return len(CommandLine.args) }
-
-// Args returns the non-flag arguments.
-func (f *FlagSet) Args() []string { return f.args }
-
-// Args returns the non-flag command-line arguments.
-func Args() []string { return CommandLine.args }
-
-// Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
-// caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
-// decompose the comma-separated string into the slice.
-func (f *FlagSet) Var(value Value, name string, usage string) {
-	f.VarP(value, name, "", usage)
-}
-
-// VarPF is like VarP, but returns the flag created
-func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
-	// Remember the default value as a string; it won't change.
-	flag := &Flag{
-		Name:      name,
-		Shorthand: shorthand,
-		Usage:     usage,
-		Value:     value,
-		DefValue:  value.String(),
-	}
-	f.AddFlag(flag)
-	return flag
-}
-
-// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
-	f.VarPF(value, name, shorthand, usage)
-}
-
-// AddFlag will add the flag to the FlagSet
-func (f *FlagSet) AddFlag(flag *Flag) {
-	normalizedFlagName := f.normalizeFlagName(flag.Name)
-
-	_, alreadyThere := f.formal[normalizedFlagName]
-	if alreadyThere {
-		msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
-		fmt.Fprintln(f.out(), msg)
-		panic(msg) // Happens only if flags are declared with identical names
-	}
-	if f.formal == nil {
-		f.formal = make(map[NormalizedName]*Flag)
-	}
-
-	flag.Name = string(normalizedFlagName)
-	f.formal[normalizedFlagName] = flag
-	f.orderedFormal = append(f.orderedFormal, flag)
-
-	if flag.Shorthand == "" {
-		return
-	}
-	if len(flag.Shorthand) > 1 {
-		msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
-		fmt.Fprintf(f.out(), msg)
-		panic(msg)
-	}
-	if f.shorthands == nil {
-		f.shorthands = make(map[byte]*Flag)
-	}
-	c := flag.Shorthand[0]
-	used, alreadyThere := f.shorthands[c]
-	if alreadyThere {
-		msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
-		fmt.Fprintf(f.out(), msg)
-		panic(msg)
-	}
-	f.shorthands[c] = flag
-}
-
-// AddFlagSet adds one FlagSet to another. If a flag is already present in f
-// the flag from newSet will be ignored.
-func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
-	if newSet == nil {
-		return
-	}
-	newSet.VisitAll(func(flag *Flag) {
-		if f.Lookup(flag.Name) == nil {
-			f.AddFlag(flag)
-		}
-	})
-}
-
-// Var defines a flag with the specified name and usage string. The type and
-// value of the flag are represented by the first argument, of type Value, which
-// typically holds a user-defined implementation of Value. For instance, the
-// caller could create a flag that turns a comma-separated string into a slice
-// of strings by giving the slice the methods of Value; in particular, Set would
-// decompose the comma-separated string into the slice.
-func Var(value Value, name string, usage string) {
-	CommandLine.VarP(value, name, "", usage)
-}
-
-// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
-func VarP(value Value, name, shorthand, usage string) {
-	CommandLine.VarP(value, name, shorthand, usage)
-}
-
-// failf prints to standard error a formatted error and usage message and
-// returns the error.
-func (f *FlagSet) failf(format string, a ...interface{}) error {
-	err := fmt.Errorf(format, a...)
-	if f.errorHandling != ContinueOnError {
-		fmt.Fprintln(f.out(), err)
-		f.usage()
-	}
-	return err
-}
-
-// usage calls the Usage method for the flag set, or the usage function if
-// the flag set is CommandLine.
-func (f *FlagSet) usage() {
-	if f == CommandLine {
-		Usage()
-	} else if f.Usage == nil {
-		defaultUsage(f)
-	} else {
-		f.Usage()
-	}
-}
-
-//--unknown (args will be empty)
-//--unknown --next-flag ... (args will be --next-flag ...)
-//--unknown arg ... (args will be arg ...)
-func stripUnknownFlagValue(args []string) []string {
-	if len(args) == 0 {
-		//--unknown
-		return args
-	}
-
-	first := args[0]
-	if len(first) > 0 && first[0] == '-' {
-		//--unknown --next-flag ...
-		return args
-	}
-
-	//--unknown arg ... (args will be arg ...)
-	if len(args) > 1 {
-		return args[1:]
-	}
-	return nil
-}
-
-func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
-	a = args
-	name := s[2:]
-	if len(name) == 0 || name[0] == '-' || name[0] == '=' {
-		err = f.failf("bad flag syntax: %s", s)
-		return
-	}
-
-	split := strings.SplitN(name, "=", 2)
-	name = split[0]
-	flag, exists := f.formal[f.normalizeFlagName(name)]
-
-	if !exists {
-		switch {
-		case name == "help":
-			f.usage()
-			return a, ErrHelp
-		case f.ParseErrorsWhitelist.UnknownFlags:
-			// --unknown=unknownval arg ...
-			// we do not want to lose arg in this case
-			if len(split) >= 2 {
-				return a, nil
-			}
-
-			return stripUnknownFlagValue(a), nil
-		default:
-			err = f.failf("unknown flag: --%s", name)
-			return
-		}
-	}
-
-	var value string
-	if len(split) == 2 {
-		// '--flag=arg'
-		value = split[1]
-	} else if flag.NoOptDefVal != "" {
-		// '--flag' (arg was optional)
-		value = flag.NoOptDefVal
-	} else if len(a) > 0 {
-		// '--flag arg'
-		value = a[0]
-		a = a[1:]
-	} else {
-		// '--flag' (arg was required)
-		err = f.failf("flag needs an argument: %s", s)
-		return
-	}
-
-	err = fn(flag, value)
-	if err != nil {
-		f.failf(err.Error())
-	}
-	return
-}
-
-func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
-	outArgs = args
-
-	if strings.HasPrefix(shorthands, "test.") {
-		return
-	}
-
-	outShorts = shorthands[1:]
-	c := shorthands[0]
-
-	flag, exists := f.shorthands[c]
-	if !exists {
-		switch {
-		case c == 'h':
-			f.usage()
-			err = ErrHelp
-			return
-		case f.ParseErrorsWhitelist.UnknownFlags:
-			// '-f=arg arg ...'
-			// we do not want to lose arg in this case
-			if len(shorthands) > 2 && shorthands[1] == '=' {
-				outShorts = ""
-				return
-			}
-
-			outArgs = stripUnknownFlagValue(outArgs)
-			return
-		default:
-			err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
-			return
-		}
-	}
-
-	var value string
-	if len(shorthands) > 2 && shorthands[1] == '=' {
-		// '-f=arg'
-		value = shorthands[2:]
-		outShorts = ""
-	} else if flag.NoOptDefVal != "" {
-		// '-f' (arg was optional)
-		value = flag.NoOptDefVal
-	} else if len(shorthands) > 1 {
-		// '-farg'
-		value = shorthands[1:]
-		outShorts = ""
-	} else if len(args) > 0 {
-		// '-f arg'
-		value = args[0]
-		outArgs = args[1:]
-	} else {
-		// '-f' (arg was required)
-		err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
-		return
-	}
-
-	if flag.ShorthandDeprecated != "" {
-		fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
-	}
-
-	err = fn(flag, value)
-	if err != nil {
-		f.failf(err.Error())
-	}
-	return
-}
-
-func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
-	a = args
-	shorthands := s[1:]
-
-	// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
-	for len(shorthands) > 0 {
-		shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
-		if err != nil {
-			return
-		}
-	}
-
-	return
-}
-
-func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
-	for len(args) > 0 {
-		s := args[0]
-		args = args[1:]
-		if len(s) == 0 || s[0] != '-' || len(s) == 1 {
-			if !f.interspersed {
-				f.args = append(f.args, s)
-				f.args = append(f.args, args...)
-				return nil
-			}
-			f.args = append(f.args, s)
-			continue
-		}
-
-		if s[1] == '-' {
-			if len(s) == 2 { // "--" terminates the flags
-				f.argsLenAtDash = len(f.args)
-				f.args = append(f.args, args...)
-				break
-			}
-			args, err = f.parseLongArg(s, args, fn)
-		} else {
-			args, err = f.parseShortArg(s, args, fn)
-		}
-		if err != nil {
-			return
-		}
-	}
-	return
-}
-
-// Parse parses flag definitions from the argument list, which should not
-// include the command name.  Must be called after all flags in the FlagSet
-// are defined and before flags are accessed by the program.
-// The return value will be ErrHelp if -help was set but not defined.
-func (f *FlagSet) Parse(arguments []string) error {
-	if f.addedGoFlagSets != nil {
-		for _, goFlagSet := range f.addedGoFlagSets {
-			goFlagSet.Parse(nil)
-		}
-	}
-	f.parsed = true
-
-	if len(arguments) < 0 {
-		return nil
-	}
-
-	f.args = make([]string, 0, len(arguments))
-
-	set := func(flag *Flag, value string) error {
-		return f.Set(flag.Name, value)
-	}
-
-	err := f.parseArgs(arguments, set)
-	if err != nil {
-		switch f.errorHandling {
-		case ContinueOnError:
-			return err
-		case ExitOnError:
-			fmt.Println(err)
-			os.Exit(2)
-		case PanicOnError:
-			panic(err)
-		}
-	}
-	return nil
-}
-
-type parseFunc func(flag *Flag, value string) error
-
-// ParseAll parses flag definitions from the argument list, which should not
-// include the command name. The arguments for fn are flag and value. Must be
-// called after all flags in the FlagSet are defined and before flags are
-// accessed by the program. The return value will be ErrHelp if -help was set
-// but not defined.
-func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
-	f.parsed = true
-	f.args = make([]string, 0, len(arguments))
-
-	err := f.parseArgs(arguments, fn)
-	if err != nil {
-		switch f.errorHandling {
-		case ContinueOnError:
-			return err
-		case ExitOnError:
-			os.Exit(2)
-		case PanicOnError:
-			panic(err)
-		}
-	}
-	return nil
-}
-
-// Parsed reports whether f.Parse has been called.
-func (f *FlagSet) Parsed() bool {
-	return f.parsed
-}
-
-// Parse parses the command-line flags from os.Args[1:].  Must be called
-// after all flags are defined and before flags are accessed by the program.
-func Parse() {
-	// Ignore errors; CommandLine is set for ExitOnError.
-	CommandLine.Parse(os.Args[1:])
-}
-
-// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
-// The arguments for fn are flag and value. Must be called after all flags are
-// defined and before flags are accessed by the program.
-func ParseAll(fn func(flag *Flag, value string) error) {
-	// Ignore errors; CommandLine is set for ExitOnError.
-	CommandLine.ParseAll(os.Args[1:], fn)
-}
-
-// SetInterspersed sets whether to support interspersed option/non-option arguments.
-func SetInterspersed(interspersed bool) {
-	CommandLine.SetInterspersed(interspersed)
-}
-
-// Parsed returns true if the command-line flags have been parsed.
-func Parsed() bool {
-	return CommandLine.Parsed()
-}
-
-// CommandLine is the default set of command-line flags, parsed from os.Args.
-var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
-
-// NewFlagSet returns a new, empty flag set with the specified name,
-// error handling property and SortFlags set to true.
-func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
-	f := &FlagSet{
-		name:          name,
-		errorHandling: errorHandling,
-		argsLenAtDash: -1,
-		interspersed:  true,
-		SortFlags:     true,
-	}
-	return f
-}
-
-// SetInterspersed sets whether to support interspersed option/non-option arguments.
-func (f *FlagSet) SetInterspersed(interspersed bool) {
-	f.interspersed = interspersed
-}
-
-// Init sets the name and error handling property for a flag set.
-// By default, the zero FlagSet uses an empty name and the
-// ContinueOnError error handling policy.
-func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
-	f.name = name
-	f.errorHandling = errorHandling
-	f.argsLenAtDash = -1
-}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
deleted file mode 100644
index a243f81..0000000
--- a/vendor/github.com/spf13/pflag/float32.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- float32 Value
-type float32Value float32
-
-func newFloat32Value(val float32, p *float32) *float32Value {
-	*p = val
-	return (*float32Value)(p)
-}
-
-func (f *float32Value) Set(s string) error {
-	v, err := strconv.ParseFloat(s, 32)
-	*f = float32Value(v)
-	return err
-}
-
-func (f *float32Value) Type() string {
-	return "float32"
-}
-
-func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
-
-func float32Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseFloat(sval, 32)
-	if err != nil {
-		return 0, err
-	}
-	return float32(v), nil
-}
-
-// GetFloat32 return the float32 value of a flag with the given name
-func (f *FlagSet) GetFloat32(name string) (float32, error) {
-	val, err := f.getFlagType(name, "float32", float32Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(float32), nil
-}
-
-// Float32Var defines a float32 flag with specified name, default value, and usage string.
-// The argument p points to a float32 variable in which to store the value of the flag.
-func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
-	f.VarP(newFloat32Value(value, p), name, "", usage)
-}
-
-// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
-	f.VarP(newFloat32Value(value, p), name, shorthand, usage)
-}
-
-// Float32Var defines a float32 flag with specified name, default value, and usage string.
-// The argument p points to a float32 variable in which to store the value of the flag.
-func Float32Var(p *float32, name string, value float32, usage string) {
-	CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
-}
-
-// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
-func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
-	CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
-}
-
-// Float32 defines a float32 flag with specified name, default value, and usage string.
-// The return value is the address of a float32 variable that stores the value of the flag.
-func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
-	p := new(float32)
-	f.Float32VarP(p, name, "", value, usage)
-	return p
-}
-
-// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
-	p := new(float32)
-	f.Float32VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Float32 defines a float32 flag with specified name, default value, and usage string.
-// The return value is the address of a float32 variable that stores the value of the flag.
-func Float32(name string, value float32, usage string) *float32 {
-	return CommandLine.Float32P(name, "", value, usage)
-}
-
-// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
-func Float32P(name, shorthand string, value float32, usage string) *float32 {
-	return CommandLine.Float32P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
deleted file mode 100644
index 04b5492..0000000
--- a/vendor/github.com/spf13/pflag/float64.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- float64 Value
-type float64Value float64
-
-func newFloat64Value(val float64, p *float64) *float64Value {
-	*p = val
-	return (*float64Value)(p)
-}
-
-func (f *float64Value) Set(s string) error {
-	v, err := strconv.ParseFloat(s, 64)
-	*f = float64Value(v)
-	return err
-}
-
-func (f *float64Value) Type() string {
-	return "float64"
-}
-
-func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
-
-func float64Conv(sval string) (interface{}, error) {
-	return strconv.ParseFloat(sval, 64)
-}
-
-// GetFloat64 return the float64 value of a flag with the given name
-func (f *FlagSet) GetFloat64(name string) (float64, error) {
-	val, err := f.getFlagType(name, "float64", float64Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(float64), nil
-}
-
-// Float64Var defines a float64 flag with specified name, default value, and usage string.
-// The argument p points to a float64 variable in which to store the value of the flag.
-func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
-	f.VarP(newFloat64Value(value, p), name, "", usage)
-}
-
-// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
-	f.VarP(newFloat64Value(value, p), name, shorthand, usage)
-}
-
-// Float64Var defines a float64 flag with specified name, default value, and usage string.
-// The argument p points to a float64 variable in which to store the value of the flag.
-func Float64Var(p *float64, name string, value float64, usage string) {
-	CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
-}
-
-// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
-func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
-	CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
-}
-
-// Float64 defines a float64 flag with specified name, default value, and usage string.
-// The return value is the address of a float64 variable that stores the value of the flag.
-func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
-	p := new(float64)
-	f.Float64VarP(p, name, "", value, usage)
-	return p
-}
-
-// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
-	p := new(float64)
-	f.Float64VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Float64 defines a float64 flag with specified name, default value, and usage string.
-// The return value is the address of a float64 variable that stores the value of the flag.
-func Float64(name string, value float64, usage string) *float64 {
-	return CommandLine.Float64P(name, "", value, usage)
-}
-
-// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
-func Float64P(name, shorthand string, value float64, usage string) *float64 {
-	return CommandLine.Float64P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
deleted file mode 100644
index d3dd72b..0000000
--- a/vendor/github.com/spf13/pflag/golangflag.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pflag
-
-import (
-	goflag "flag"
-	"reflect"
-	"strings"
-)
-
-// flagValueWrapper implements pflag.Value around a flag.Value.  The main
-// difference here is the addition of the Type method that returns a string
-// name of the type.  As this is generally unknown, we approximate that with
-// reflection.
-type flagValueWrapper struct {
-	inner    goflag.Value
-	flagType string
-}
-
-// We are just copying the boolFlag interface out of goflag as that is what
-// they use to decide if a flag should get "true" when no arg is given.
-type goBoolFlag interface {
-	goflag.Value
-	IsBoolFlag() bool
-}
-
-func wrapFlagValue(v goflag.Value) Value {
-	// If the flag.Value happens to also be a pflag.Value, just use it directly.
-	if pv, ok := v.(Value); ok {
-		return pv
-	}
-
-	pv := &flagValueWrapper{
-		inner: v,
-	}
-
-	t := reflect.TypeOf(v)
-	if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
-		t = t.Elem()
-	}
-
-	pv.flagType = strings.TrimSuffix(t.Name(), "Value")
-	return pv
-}
-
-func (v *flagValueWrapper) String() string {
-	return v.inner.String()
-}
-
-func (v *flagValueWrapper) Set(s string) error {
-	return v.inner.Set(s)
-}
-
-func (v *flagValueWrapper) Type() string {
-	return v.flagType
-}
-
-// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
-// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
-// with both `-v` and `--v` in flags. If the golang flag was more than a single
-// character (ex: `verbose`) it will only be accessible via `--verbose`
-func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
-	// Remember the default value as a string; it won't change.
-	flag := &Flag{
-		Name:  goflag.Name,
-		Usage: goflag.Usage,
-		Value: wrapFlagValue(goflag.Value),
-		// Looks like golang flags don't set DefValue correctly  :-(
-		//DefValue: goflag.DefValue,
-		DefValue: goflag.Value.String(),
-	}
-	// Ex: if the golang flag was -v, allow both -v and --v to work
-	if len(flag.Name) == 1 {
-		flag.Shorthand = flag.Name
-	}
-	if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
-		flag.NoOptDefVal = "true"
-	}
-	return flag
-}
-
-// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
-func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
-	if f.Lookup(goflag.Name) != nil {
-		return
-	}
-	newflag := PFlagFromGoFlag(goflag)
-	f.AddFlag(newflag)
-}
-
-// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
-func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
-	if newSet == nil {
-		return
-	}
-	newSet.VisitAll(func(goflag *goflag.Flag) {
-		f.AddGoFlag(goflag)
-	})
-	if f.addedGoFlagSets == nil {
-		f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
-	}
-	f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
-}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
deleted file mode 100644
index 1474b89..0000000
--- a/vendor/github.com/spf13/pflag/int.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- int Value
-type intValue int
-
-func newIntValue(val int, p *int) *intValue {
-	*p = val
-	return (*intValue)(p)
-}
-
-func (i *intValue) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 64)
-	*i = intValue(v)
-	return err
-}
-
-func (i *intValue) Type() string {
-	return "int"
-}
-
-func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
-
-func intConv(sval string) (interface{}, error) {
-	return strconv.Atoi(sval)
-}
-
-// GetInt return the int value of a flag with the given name
-func (f *FlagSet) GetInt(name string) (int, error) {
-	val, err := f.getFlagType(name, "int", intConv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int), nil
-}
-
-// IntVar defines an int flag with specified name, default value, and usage string.
-// The argument p points to an int variable in which to store the value of the flag.
-func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
-	f.VarP(newIntValue(value, p), name, "", usage)
-}
-
-// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
-	f.VarP(newIntValue(value, p), name, shorthand, usage)
-}
-
-// IntVar defines an int flag with specified name, default value, and usage string.
-// The argument p points to an int variable in which to store the value of the flag.
-func IntVar(p *int, name string, value int, usage string) {
-	CommandLine.VarP(newIntValue(value, p), name, "", usage)
-}
-
-// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
-func IntVarP(p *int, name, shorthand string, value int, usage string) {
-	CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
-}
-
-// Int defines an int flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-func (f *FlagSet) Int(name string, value int, usage string) *int {
-	p := new(int)
-	f.IntVarP(p, name, "", value, usage)
-	return p
-}
-
-// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
-	p := new(int)
-	f.IntVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Int defines an int flag with specified name, default value, and usage string.
-// The return value is the address of an int variable that stores the value of the flag.
-func Int(name string, value int, usage string) *int {
-	return CommandLine.IntP(name, "", value, usage)
-}
-
-// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
-func IntP(name, shorthand string, value int, usage string) *int {
-	return CommandLine.IntP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
deleted file mode 100644
index f1a01d0..0000000
--- a/vendor/github.com/spf13/pflag/int16.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- int16 Value
-type int16Value int16
-
-func newInt16Value(val int16, p *int16) *int16Value {
-	*p = val
-	return (*int16Value)(p)
-}
-
-func (i *int16Value) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 16)
-	*i = int16Value(v)
-	return err
-}
-
-func (i *int16Value) Type() string {
-	return "int16"
-}
-
-func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
-
-func int16Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseInt(sval, 0, 16)
-	if err != nil {
-		return 0, err
-	}
-	return int16(v), nil
-}
-
-// GetInt16 returns the int16 value of a flag with the given name
-func (f *FlagSet) GetInt16(name string) (int16, error) {
-	val, err := f.getFlagType(name, "int16", int16Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int16), nil
-}
-
-// Int16Var defines an int16 flag with specified name, default value, and usage string.
-// The argument p points to an int16 variable in which to store the value of the flag.
-func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
-	f.VarP(newInt16Value(value, p), name, "", usage)
-}
-
-// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
-	f.VarP(newInt16Value(value, p), name, shorthand, usage)
-}
-
-// Int16Var defines an int16 flag with specified name, default value, and usage string.
-// The argument p points to an int16 variable in which to store the value of the flag.
-func Int16Var(p *int16, name string, value int16, usage string) {
-	CommandLine.VarP(newInt16Value(value, p), name, "", usage)
-}
-
-// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
-func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
-	CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
-}
-
-// Int16 defines an int16 flag with specified name, default value, and usage string.
-// The return value is the address of an int16 variable that stores the value of the flag.
-func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
-	p := new(int16)
-	f.Int16VarP(p, name, "", value, usage)
-	return p
-}
-
-// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
-	p := new(int16)
-	f.Int16VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Int16 defines an int16 flag with specified name, default value, and usage string.
-// The return value is the address of an int16 variable that stores the value of the flag.
-func Int16(name string, value int16, usage string) *int16 {
-	return CommandLine.Int16P(name, "", value, usage)
-}
-
-// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
-func Int16P(name, shorthand string, value int16, usage string) *int16 {
-	return CommandLine.Int16P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
deleted file mode 100644
index 9b95944..0000000
--- a/vendor/github.com/spf13/pflag/int32.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- int32 Value
-type int32Value int32
-
-func newInt32Value(val int32, p *int32) *int32Value {
-	*p = val
-	return (*int32Value)(p)
-}
-
-func (i *int32Value) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 32)
-	*i = int32Value(v)
-	return err
-}
-
-func (i *int32Value) Type() string {
-	return "int32"
-}
-
-func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
-
-func int32Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseInt(sval, 0, 32)
-	if err != nil {
-		return 0, err
-	}
-	return int32(v), nil
-}
-
-// GetInt32 return the int32 value of a flag with the given name
-func (f *FlagSet) GetInt32(name string) (int32, error) {
-	val, err := f.getFlagType(name, "int32", int32Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int32), nil
-}
-
-// Int32Var defines an int32 flag with specified name, default value, and usage string.
-// The argument p points to an int32 variable in which to store the value of the flag.
-func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
-	f.VarP(newInt32Value(value, p), name, "", usage)
-}
-
-// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
-	f.VarP(newInt32Value(value, p), name, shorthand, usage)
-}
-
-// Int32Var defines an int32 flag with specified name, default value, and usage string.
-// The argument p points to an int32 variable in which to store the value of the flag.
-func Int32Var(p *int32, name string, value int32, usage string) {
-	CommandLine.VarP(newInt32Value(value, p), name, "", usage)
-}
-
-// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
-func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
-	CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
-}
-
-// Int32 defines an int32 flag with specified name, default value, and usage string.
-// The return value is the address of an int32 variable that stores the value of the flag.
-func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
-	p := new(int32)
-	f.Int32VarP(p, name, "", value, usage)
-	return p
-}
-
-// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
-	p := new(int32)
-	f.Int32VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Int32 defines an int32 flag with specified name, default value, and usage string.
-// The return value is the address of an int32 variable that stores the value of the flag.
-func Int32(name string, value int32, usage string) *int32 {
-	return CommandLine.Int32P(name, "", value, usage)
-}
-
-// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
-func Int32P(name, shorthand string, value int32, usage string) *int32 {
-	return CommandLine.Int32P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
deleted file mode 100644
index 0026d78..0000000
--- a/vendor/github.com/spf13/pflag/int64.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- int64 Value
-type int64Value int64
-
-func newInt64Value(val int64, p *int64) *int64Value {
-	*p = val
-	return (*int64Value)(p)
-}
-
-func (i *int64Value) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 64)
-	*i = int64Value(v)
-	return err
-}
-
-func (i *int64Value) Type() string {
-	return "int64"
-}
-
-func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
-
-func int64Conv(sval string) (interface{}, error) {
-	return strconv.ParseInt(sval, 0, 64)
-}
-
-// GetInt64 return the int64 value of a flag with the given name
-func (f *FlagSet) GetInt64(name string) (int64, error) {
-	val, err := f.getFlagType(name, "int64", int64Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int64), nil
-}
-
-// Int64Var defines an int64 flag with specified name, default value, and usage string.
-// The argument p points to an int64 variable in which to store the value of the flag.
-func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
-	f.VarP(newInt64Value(value, p), name, "", usage)
-}
-
-// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
-	f.VarP(newInt64Value(value, p), name, shorthand, usage)
-}
-
-// Int64Var defines an int64 flag with specified name, default value, and usage string.
-// The argument p points to an int64 variable in which to store the value of the flag.
-func Int64Var(p *int64, name string, value int64, usage string) {
-	CommandLine.VarP(newInt64Value(value, p), name, "", usage)
-}
-
-// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
-func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
-	CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
-}
-
-// Int64 defines an int64 flag with specified name, default value, and usage string.
-// The return value is the address of an int64 variable that stores the value of the flag.
-func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
-	p := new(int64)
-	f.Int64VarP(p, name, "", value, usage)
-	return p
-}
-
-// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
-	p := new(int64)
-	f.Int64VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Int64 defines an int64 flag with specified name, default value, and usage string.
-// The return value is the address of an int64 variable that stores the value of the flag.
-func Int64(name string, value int64, usage string) *int64 {
-	return CommandLine.Int64P(name, "", value, usage)
-}
-
-// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
-func Int64P(name, shorthand string, value int64, usage string) *int64 {
-	return CommandLine.Int64P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
deleted file mode 100644
index 4da9222..0000000
--- a/vendor/github.com/spf13/pflag/int8.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- int8 Value
-type int8Value int8
-
-func newInt8Value(val int8, p *int8) *int8Value {
-	*p = val
-	return (*int8Value)(p)
-}
-
-func (i *int8Value) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 8)
-	*i = int8Value(v)
-	return err
-}
-
-func (i *int8Value) Type() string {
-	return "int8"
-}
-
-func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
-
-func int8Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseInt(sval, 0, 8)
-	if err != nil {
-		return 0, err
-	}
-	return int8(v), nil
-}
-
-// GetInt8 return the int8 value of a flag with the given name
-func (f *FlagSet) GetInt8(name string) (int8, error) {
-	val, err := f.getFlagType(name, "int8", int8Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(int8), nil
-}
-
-// Int8Var defines an int8 flag with specified name, default value, and usage string.
-// The argument p points to an int8 variable in which to store the value of the flag.
-func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
-	f.VarP(newInt8Value(value, p), name, "", usage)
-}
-
-// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
-	f.VarP(newInt8Value(value, p), name, shorthand, usage)
-}
-
-// Int8Var defines an int8 flag with specified name, default value, and usage string.
-// The argument p points to an int8 variable in which to store the value of the flag.
-func Int8Var(p *int8, name string, value int8, usage string) {
-	CommandLine.VarP(newInt8Value(value, p), name, "", usage)
-}
-
-// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
-func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
-	CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
-}
-
-// Int8 defines an int8 flag with specified name, default value, and usage string.
-// The return value is the address of an int8 variable that stores the value of the flag.
-func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
-	p := new(int8)
-	f.Int8VarP(p, name, "", value, usage)
-	return p
-}
-
-// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
-	p := new(int8)
-	f.Int8VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Int8 defines an int8 flag with specified name, default value, and usage string.
-// The return value is the address of an int8 variable that stores the value of the flag.
-func Int8(name string, value int8, usage string) *int8 {
-	return CommandLine.Int8P(name, "", value, usage)
-}
-
-// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
-func Int8P(name, shorthand string, value int8, usage string) *int8 {
-	return CommandLine.Int8P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
deleted file mode 100644
index 1e7c9ed..0000000
--- a/vendor/github.com/spf13/pflag/int_slice.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// -- intSlice Value
-type intSliceValue struct {
-	value   *[]int
-	changed bool
-}
-
-func newIntSliceValue(val []int, p *[]int) *intSliceValue {
-	isv := new(intSliceValue)
-	isv.value = p
-	*isv.value = val
-	return isv
-}
-
-func (s *intSliceValue) Set(val string) error {
-	ss := strings.Split(val, ",")
-	out := make([]int, len(ss))
-	for i, d := range ss {
-		var err error
-		out[i], err = strconv.Atoi(d)
-		if err != nil {
-			return err
-		}
-
-	}
-	if !s.changed {
-		*s.value = out
-	} else {
-		*s.value = append(*s.value, out...)
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *intSliceValue) Type() string {
-	return "intSlice"
-}
-
-func (s *intSliceValue) String() string {
-	out := make([]string, len(*s.value))
-	for i, d := range *s.value {
-		out[i] = fmt.Sprintf("%d", d)
-	}
-	return "[" + strings.Join(out, ",") + "]"
-}
-
-func intSliceConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// Empty string would cause a slice with one (empty) entry
-	if len(val) == 0 {
-		return []int{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make([]int, len(ss))
-	for i, d := range ss {
-		var err error
-		out[i], err = strconv.Atoi(d)
-		if err != nil {
-			return nil, err
-		}
-
-	}
-	return out, nil
-}
-
-// GetIntSlice return the []int value of a flag with the given name
-func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
-	val, err := f.getFlagType(name, "intSlice", intSliceConv)
-	if err != nil {
-		return []int{}, err
-	}
-	return val.([]int), nil
-}
-
-// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
-// The argument p points to a []int variable in which to store the value of the flag.
-func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
-	f.VarP(newIntSliceValue(value, p), name, "", usage)
-}
-
-// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
-	f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
-}
-
-// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
-// The argument p points to a int[] variable in which to store the value of the flag.
-func IntSliceVar(p *[]int, name string, value []int, usage string) {
-	CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
-}
-
-// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
-	CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
-}
-
-// IntSlice defines a []int flag with specified name, default value, and usage string.
-// The return value is the address of a []int variable that stores the value of the flag.
-func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
-	p := []int{}
-	f.IntSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
-	p := []int{}
-	f.IntSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// IntSlice defines a []int flag with specified name, default value, and usage string.
-// The return value is the address of a []int variable that stores the value of the flag.
-func IntSlice(name string, value []int, usage string) *[]int {
-	return CommandLine.IntSliceP(name, "", value, usage)
-}
-
-// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
-func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
-	return CommandLine.IntSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
deleted file mode 100644
index 3d414ba..0000000
--- a/vendor/github.com/spf13/pflag/ip.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"net"
-	"strings"
-)
-
-// -- net.IP value
-type ipValue net.IP
-
-func newIPValue(val net.IP, p *net.IP) *ipValue {
-	*p = val
-	return (*ipValue)(p)
-}
-
-func (i *ipValue) String() string { return net.IP(*i).String() }
-func (i *ipValue) Set(s string) error {
-	ip := net.ParseIP(strings.TrimSpace(s))
-	if ip == nil {
-		return fmt.Errorf("failed to parse IP: %q", s)
-	}
-	*i = ipValue(ip)
-	return nil
-}
-
-func (i *ipValue) Type() string {
-	return "ip"
-}
-
-func ipConv(sval string) (interface{}, error) {
-	ip := net.ParseIP(sval)
-	if ip != nil {
-		return ip, nil
-	}
-	return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
-}
-
-// GetIP return the net.IP value of a flag with the given name
-func (f *FlagSet) GetIP(name string) (net.IP, error) {
-	val, err := f.getFlagType(name, "ip", ipConv)
-	if err != nil {
-		return nil, err
-	}
-	return val.(net.IP), nil
-}
-
-// IPVar defines an net.IP flag with specified name, default value, and usage string.
-// The argument p points to an net.IP variable in which to store the value of the flag.
-func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
-	f.VarP(newIPValue(value, p), name, "", usage)
-}
-
-// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
-	f.VarP(newIPValue(value, p), name, shorthand, usage)
-}
-
-// IPVar defines an net.IP flag with specified name, default value, and usage string.
-// The argument p points to an net.IP variable in which to store the value of the flag.
-func IPVar(p *net.IP, name string, value net.IP, usage string) {
-	CommandLine.VarP(newIPValue(value, p), name, "", usage)
-}
-
-// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
-func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
-	CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
-}
-
-// IP defines an net.IP flag with specified name, default value, and usage string.
-// The return value is the address of an net.IP variable that stores the value of the flag.
-func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
-	p := new(net.IP)
-	f.IPVarP(p, name, "", value, usage)
-	return p
-}
-
-// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
-	p := new(net.IP)
-	f.IPVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// IP defines an net.IP flag with specified name, default value, and usage string.
-// The return value is the address of an net.IP variable that stores the value of the flag.
-func IP(name string, value net.IP, usage string) *net.IP {
-	return CommandLine.IPP(name, "", value, usage)
-}
-
-// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
-func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
-	return CommandLine.IPP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
deleted file mode 100644
index 7dd196f..0000000
--- a/vendor/github.com/spf13/pflag/ip_slice.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"strings"
-)
-
-// -- ipSlice Value
-type ipSliceValue struct {
-	value   *[]net.IP
-	changed bool
-}
-
-func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
-	ipsv := new(ipSliceValue)
-	ipsv.value = p
-	*ipsv.value = val
-	return ipsv
-}
-
-// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
-// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
-func (s *ipSliceValue) Set(val string) error {
-
-	// remove all quote characters
-	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
-
-	// read flag arguments with CSV parser
-	ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
-	if err != nil && err != io.EOF {
-		return err
-	}
-
-	// parse ip values into slice
-	out := make([]net.IP, 0, len(ipStrSlice))
-	for _, ipStr := range ipStrSlice {
-		ip := net.ParseIP(strings.TrimSpace(ipStr))
-		if ip == nil {
-			return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
-		}
-		out = append(out, ip)
-	}
-
-	if !s.changed {
-		*s.value = out
-	} else {
-		*s.value = append(*s.value, out...)
-	}
-
-	s.changed = true
-
-	return nil
-}
-
-// Type returns a string that uniquely represents this flag's type.
-func (s *ipSliceValue) Type() string {
-	return "ipSlice"
-}
-
-// String defines a "native" format for this net.IP slice flag value.
-func (s *ipSliceValue) String() string {
-
-	ipStrSlice := make([]string, len(*s.value))
-	for i, ip := range *s.value {
-		ipStrSlice[i] = ip.String()
-	}
-
-	out, _ := writeAsCSV(ipStrSlice)
-
-	return "[" + out + "]"
-}
-
-func ipSliceConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// Emtpy string would cause a slice with one (empty) entry
-	if len(val) == 0 {
-		return []net.IP{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make([]net.IP, len(ss))
-	for i, sval := range ss {
-		ip := net.ParseIP(strings.TrimSpace(sval))
-		if ip == nil {
-			return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
-		}
-		out[i] = ip
-	}
-	return out, nil
-}
-
-// GetIPSlice returns the []net.IP value of a flag with the given name
-func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
-	val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
-	if err != nil {
-		return []net.IP{}, err
-	}
-	return val.([]net.IP), nil
-}
-
-// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
-// The argument p points to a []net.IP variable in which to store the value of the flag.
-func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
-	f.VarP(newIPSliceValue(value, p), name, "", usage)
-}
-
-// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
-	f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
-}
-
-// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
-// The argument p points to a []net.IP variable in which to store the value of the flag.
-func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
-	CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
-}
-
-// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
-	CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
-}
-
-// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
-// The return value is the address of a []net.IP variable that stores the value of that flag.
-func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
-	p := []net.IP{}
-	f.IPSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
-	p := []net.IP{}
-	f.IPSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
-// The return value is the address of a []net.IP variable that stores the value of the flag.
-func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
-	return CommandLine.IPSliceP(name, "", value, usage)
-}
-
-// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
-func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
-	return CommandLine.IPSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
deleted file mode 100644
index 5bd44bd..0000000
--- a/vendor/github.com/spf13/pflag/ipmask.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"net"
-	"strconv"
-)
-
-// -- net.IPMask value
-type ipMaskValue net.IPMask
-
-func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
-	*p = val
-	return (*ipMaskValue)(p)
-}
-
-func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
-func (i *ipMaskValue) Set(s string) error {
-	ip := ParseIPv4Mask(s)
-	if ip == nil {
-		return fmt.Errorf("failed to parse IP mask: %q", s)
-	}
-	*i = ipMaskValue(ip)
-	return nil
-}
-
-func (i *ipMaskValue) Type() string {
-	return "ipMask"
-}
-
-// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
-// This function should really belong to the net package.
-func ParseIPv4Mask(s string) net.IPMask {
-	mask := net.ParseIP(s)
-	if mask == nil {
-		if len(s) != 8 {
-			return nil
-		}
-		// net.IPMask.String() actually outputs things like ffffff00
-		// so write a horrible parser for that as well  :-(
-		m := []int{}
-		for i := 0; i < 4; i++ {
-			b := "0x" + s[2*i:2*i+2]
-			d, err := strconv.ParseInt(b, 0, 0)
-			if err != nil {
-				return nil
-			}
-			m = append(m, int(d))
-		}
-		s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
-		mask = net.ParseIP(s)
-		if mask == nil {
-			return nil
-		}
-	}
-	return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
-}
-
-func parseIPv4Mask(sval string) (interface{}, error) {
-	mask := ParseIPv4Mask(sval)
-	if mask == nil {
-		return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
-	}
-	return mask, nil
-}
-
-// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
-func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
-	val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
-	if err != nil {
-		return nil, err
-	}
-	return val.(net.IPMask), nil
-}
-
-// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
-// The argument p points to an net.IPMask variable in which to store the value of the flag.
-func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
-	f.VarP(newIPMaskValue(value, p), name, "", usage)
-}
-
-// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
-	f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
-}
-
-// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
-// The argument p points to an net.IPMask variable in which to store the value of the flag.
-func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
-	CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
-}
-
-// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
-func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
-	CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
-}
-
-// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
-// The return value is the address of an net.IPMask variable that stores the value of the flag.
-func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
-	p := new(net.IPMask)
-	f.IPMaskVarP(p, name, "", value, usage)
-	return p
-}
-
-// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
-	p := new(net.IPMask)
-	f.IPMaskVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
-// The return value is the address of an net.IPMask variable that stores the value of the flag.
-func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
-	return CommandLine.IPMaskP(name, "", value, usage)
-}
-
-// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
-func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
-	return CommandLine.IPMaskP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
deleted file mode 100644
index e2c1b8b..0000000
--- a/vendor/github.com/spf13/pflag/ipnet.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"net"
-	"strings"
-)
-
-// IPNet adapts net.IPNet for use as a flag.
-type ipNetValue net.IPNet
-
-func (ipnet ipNetValue) String() string {
-	n := net.IPNet(ipnet)
-	return n.String()
-}
-
-func (ipnet *ipNetValue) Set(value string) error {
-	_, n, err := net.ParseCIDR(strings.TrimSpace(value))
-	if err != nil {
-		return err
-	}
-	*ipnet = ipNetValue(*n)
-	return nil
-}
-
-func (*ipNetValue) Type() string {
-	return "ipNet"
-}
-
-func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
-	*p = val
-	return (*ipNetValue)(p)
-}
-
-func ipNetConv(sval string) (interface{}, error) {
-	_, n, err := net.ParseCIDR(strings.TrimSpace(sval))
-	if err == nil {
-		return *n, nil
-	}
-	return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
-}
-
-// GetIPNet return the net.IPNet value of a flag with the given name
-func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
-	val, err := f.getFlagType(name, "ipNet", ipNetConv)
-	if err != nil {
-		return net.IPNet{}, err
-	}
-	return val.(net.IPNet), nil
-}
-
-// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
-// The argument p points to an net.IPNet variable in which to store the value of the flag.
-func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
-	f.VarP(newIPNetValue(value, p), name, "", usage)
-}
-
-// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
-	f.VarP(newIPNetValue(value, p), name, shorthand, usage)
-}
-
-// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
-// The argument p points to an net.IPNet variable in which to store the value of the flag.
-func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
-	CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
-}
-
-// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
-func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
-	CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
-}
-
-// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
-// The return value is the address of an net.IPNet variable that stores the value of the flag.
-func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
-	p := new(net.IPNet)
-	f.IPNetVarP(p, name, "", value, usage)
-	return p
-}
-
-// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
-	p := new(net.IPNet)
-	f.IPNetVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
-// The return value is the address of an net.IPNet variable that stores the value of the flag.
-func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
-	return CommandLine.IPNetP(name, "", value, usage)
-}
-
-// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
-func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
-	return CommandLine.IPNetP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
deleted file mode 100644
index 04e0a26..0000000
--- a/vendor/github.com/spf13/pflag/string.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package pflag
-
-// -- string Value
-type stringValue string
-
-func newStringValue(val string, p *string) *stringValue {
-	*p = val
-	return (*stringValue)(p)
-}
-
-func (s *stringValue) Set(val string) error {
-	*s = stringValue(val)
-	return nil
-}
-func (s *stringValue) Type() string {
-	return "string"
-}
-
-func (s *stringValue) String() string { return string(*s) }
-
-func stringConv(sval string) (interface{}, error) {
-	return sval, nil
-}
-
-// GetString return the string value of a flag with the given name
-func (f *FlagSet) GetString(name string) (string, error) {
-	val, err := f.getFlagType(name, "string", stringConv)
-	if err != nil {
-		return "", err
-	}
-	return val.(string), nil
-}
-
-// StringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a string variable in which to store the value of the flag.
-func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
-	f.VarP(newStringValue(value, p), name, "", usage)
-}
-
-// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
-	f.VarP(newStringValue(value, p), name, shorthand, usage)
-}
-
-// StringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a string variable in which to store the value of the flag.
-func StringVar(p *string, name string, value string, usage string) {
-	CommandLine.VarP(newStringValue(value, p), name, "", usage)
-}
-
-// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
-func StringVarP(p *string, name, shorthand string, value string, usage string) {
-	CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
-}
-
-// String defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a string variable that stores the value of the flag.
-func (f *FlagSet) String(name string, value string, usage string) *string {
-	p := new(string)
-	f.StringVarP(p, name, "", value, usage)
-	return p
-}
-
-// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
-	p := new(string)
-	f.StringVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// String defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a string variable that stores the value of the flag.
-func String(name string, value string, usage string) *string {
-	return CommandLine.StringP(name, "", value, usage)
-}
-
-// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
-func StringP(name, shorthand string, value string, usage string) *string {
-	return CommandLine.StringP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
deleted file mode 100644
index fa7bc60..0000000
--- a/vendor/github.com/spf13/pflag/string_array.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package pflag
-
-// -- stringArray Value
-type stringArrayValue struct {
-	value   *[]string
-	changed bool
-}
-
-func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
-	ssv := new(stringArrayValue)
-	ssv.value = p
-	*ssv.value = val
-	return ssv
-}
-
-func (s *stringArrayValue) Set(val string) error {
-	if !s.changed {
-		*s.value = []string{val}
-		s.changed = true
-	} else {
-		*s.value = append(*s.value, val)
-	}
-	return nil
-}
-
-func (s *stringArrayValue) Type() string {
-	return "stringArray"
-}
-
-func (s *stringArrayValue) String() string {
-	str, _ := writeAsCSV(*s.value)
-	return "[" + str + "]"
-}
-
-func stringArrayConv(sval string) (interface{}, error) {
-	sval = sval[1 : len(sval)-1]
-	// An empty string would cause a array with one (empty) string
-	if len(sval) == 0 {
-		return []string{}, nil
-	}
-	return readAsCSV(sval)
-}
-
-// GetStringArray return the []string value of a flag with the given name
-func (f *FlagSet) GetStringArray(name string) ([]string, error) {
-	val, err := f.getFlagType(name, "stringArray", stringArrayConv)
-	if err != nil {
-		return []string{}, err
-	}
-	return val.([]string), nil
-}
-
-// StringArrayVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a []string variable in which to store the values of the multiple flags.
-// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
-func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
-	f.VarP(newStringArrayValue(value, p), name, "", usage)
-}
-
-// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
-	f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
-}
-
-// StringArrayVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a []string variable in which to store the value of the flag.
-// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
-func StringArrayVar(p *[]string, name string, value []string, usage string) {
-	CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
-}
-
-// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
-func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
-	CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
-}
-
-// StringArray defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a []string variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
-func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
-	p := []string{}
-	f.StringArrayVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
-	p := []string{}
-	f.StringArrayVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// StringArray defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a []string variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
-func StringArray(name string, value []string, usage string) *[]string {
-	return CommandLine.StringArrayP(name, "", value, usage)
-}
-
-// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
-func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
-	return CommandLine.StringArrayP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
deleted file mode 100644
index 0cd3ccc..0000000
--- a/vendor/github.com/spf13/pflag/string_slice.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package pflag
-
-import (
-	"bytes"
-	"encoding/csv"
-	"strings"
-)
-
-// -- stringSlice Value
-type stringSliceValue struct {
-	value   *[]string
-	changed bool
-}
-
-func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
-	ssv := new(stringSliceValue)
-	ssv.value = p
-	*ssv.value = val
-	return ssv
-}
-
-func readAsCSV(val string) ([]string, error) {
-	if val == "" {
-		return []string{}, nil
-	}
-	stringReader := strings.NewReader(val)
-	csvReader := csv.NewReader(stringReader)
-	return csvReader.Read()
-}
-
-func writeAsCSV(vals []string) (string, error) {
-	b := &bytes.Buffer{}
-	w := csv.NewWriter(b)
-	err := w.Write(vals)
-	if err != nil {
-		return "", err
-	}
-	w.Flush()
-	return strings.TrimSuffix(b.String(), "\n"), nil
-}
-
-func (s *stringSliceValue) Set(val string) error {
-	v, err := readAsCSV(val)
-	if err != nil {
-		return err
-	}
-	if !s.changed {
-		*s.value = v
-	} else {
-		*s.value = append(*s.value, v...)
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *stringSliceValue) Type() string {
-	return "stringSlice"
-}
-
-func (s *stringSliceValue) String() string {
-	str, _ := writeAsCSV(*s.value)
-	return "[" + str + "]"
-}
-
-func stringSliceConv(sval string) (interface{}, error) {
-	sval = sval[1 : len(sval)-1]
-	// An empty string would cause a slice with one (empty) string
-	if len(sval) == 0 {
-		return []string{}, nil
-	}
-	return readAsCSV(sval)
-}
-
-// GetStringSlice return the []string value of a flag with the given name
-func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
-	val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
-	if err != nil {
-		return []string{}, err
-	}
-	return val.([]string), nil
-}
-
-// StringSliceVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a []string variable in which to store the value of the flag.
-// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
-// For example:
-//   --ss="v1,v2" -ss="v3"
-// will result in
-//   []string{"v1", "v2", "v3"}
-func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
-	f.VarP(newStringSliceValue(value, p), name, "", usage)
-}
-
-// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
-	f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
-}
-
-// StringSliceVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a []string variable in which to store the value of the flag.
-// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
-// For example:
-//   --ss="v1,v2" -ss="v3"
-// will result in
-//   []string{"v1", "v2", "v3"}
-func StringSliceVar(p *[]string, name string, value []string, usage string) {
-	CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
-}
-
-// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
-	CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
-}
-
-// StringSlice defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a []string variable that stores the value of the flag.
-// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
-// For example:
-//   --ss="v1,v2" -ss="v3"
-// will result in
-//   []string{"v1", "v2", "v3"}
-func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
-	p := []string{}
-	f.StringSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
-	p := []string{}
-	f.StringSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// StringSlice defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a []string variable that stores the value of the flag.
-// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
-// For example:
-//   --ss="v1,v2" -ss="v3"
-// will result in
-//   []string{"v1", "v2", "v3"}
-func StringSlice(name string, value []string, usage string) *[]string {
-	return CommandLine.StringSliceP(name, "", value, usage)
-}
-
-// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
-func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
-	return CommandLine.StringSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
deleted file mode 100644
index 5ceda39..0000000
--- a/vendor/github.com/spf13/pflag/string_to_int.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package pflag
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// -- stringToInt Value
-type stringToIntValue struct {
-	value   *map[string]int
-	changed bool
-}
-
-func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
-	ssv := new(stringToIntValue)
-	ssv.value = p
-	*ssv.value = val
-	return ssv
-}
-
-// Format: a=1,b=2
-func (s *stringToIntValue) Set(val string) error {
-	ss := strings.Split(val, ",")
-	out := make(map[string]int, len(ss))
-	for _, pair := range ss {
-		kv := strings.SplitN(pair, "=", 2)
-		if len(kv) != 2 {
-			return fmt.Errorf("%s must be formatted as key=value", pair)
-		}
-		var err error
-		out[kv[0]], err = strconv.Atoi(kv[1])
-		if err != nil {
-			return err
-		}
-	}
-	if !s.changed {
-		*s.value = out
-	} else {
-		for k, v := range out {
-			(*s.value)[k] = v
-		}
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *stringToIntValue) Type() string {
-	return "stringToInt"
-}
-
-func (s *stringToIntValue) String() string {
-	var buf bytes.Buffer
-	i := 0
-	for k, v := range *s.value {
-		if i > 0 {
-			buf.WriteRune(',')
-		}
-		buf.WriteString(k)
-		buf.WriteRune('=')
-		buf.WriteString(strconv.Itoa(v))
-		i++
-	}
-	return "[" + buf.String() + "]"
-}
-
-func stringToIntConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// An empty string would cause an empty map
-	if len(val) == 0 {
-		return map[string]int{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make(map[string]int, len(ss))
-	for _, pair := range ss {
-		kv := strings.SplitN(pair, "=", 2)
-		if len(kv) != 2 {
-			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
-		}
-		var err error
-		out[kv[0]], err = strconv.Atoi(kv[1])
-		if err != nil {
-			return nil, err
-		}
-	}
-	return out, nil
-}
-
-// GetStringToInt return the map[string]int value of a flag with the given name
-func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
-	val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
-	if err != nil {
-		return map[string]int{}, err
-	}
-	return val.(map[string]int), nil
-}
-
-// StringToIntVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
-// The value of each argument will not try to be separated by comma
-func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
-	f.VarP(newStringToIntValue(value, p), name, "", usage)
-}
-
-// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
-	f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
-}
-
-// StringToIntVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a map[string]int variable in which to store the value of the flag.
-// The value of each argument will not try to be separated by comma
-func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
-	CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
-}
-
-// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
-func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
-	CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
-}
-
-// StringToInt defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a map[string]int variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma
-func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
-	p := map[string]int{}
-	f.StringToIntVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
-	p := map[string]int{}
-	f.StringToIntVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// StringToInt defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a map[string]int variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma
-func StringToInt(name string, value map[string]int, usage string) *map[string]int {
-	return CommandLine.StringToIntP(name, "", value, usage)
-}
-
-// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
-func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
-	return CommandLine.StringToIntP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
deleted file mode 100644
index 890a01a..0000000
--- a/vendor/github.com/spf13/pflag/string_to_string.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package pflag
-
-import (
-	"bytes"
-	"encoding/csv"
-	"fmt"
-	"strings"
-)
-
-// -- stringToString Value
-type stringToStringValue struct {
-	value   *map[string]string
-	changed bool
-}
-
-func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
-	ssv := new(stringToStringValue)
-	ssv.value = p
-	*ssv.value = val
-	return ssv
-}
-
-// Format: a=1,b=2
-func (s *stringToStringValue) Set(val string) error {
-	var ss []string
-	n := strings.Count(val, "=")
-	switch n {
-	case 0:
-		return fmt.Errorf("%s must be formatted as key=value", val)
-	case 1:
-		ss = append(ss, strings.Trim(val, `"`))
-	default:
-		r := csv.NewReader(strings.NewReader(val))
-		var err error
-		ss, err = r.Read()
-		if err != nil {
-			return err
-		}
-	}
-
-	out := make(map[string]string, len(ss))
-	for _, pair := range ss {
-		kv := strings.SplitN(pair, "=", 2)
-		if len(kv) != 2 {
-			return fmt.Errorf("%s must be formatted as key=value", pair)
-		}
-		out[kv[0]] = kv[1]
-	}
-	if !s.changed {
-		*s.value = out
-	} else {
-		for k, v := range out {
-			(*s.value)[k] = v
-		}
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *stringToStringValue) Type() string {
-	return "stringToString"
-}
-
-func (s *stringToStringValue) String() string {
-	records := make([]string, 0, len(*s.value)>>1)
-	for k, v := range *s.value {
-		records = append(records, k+"="+v)
-	}
-
-	var buf bytes.Buffer
-	w := csv.NewWriter(&buf)
-	if err := w.Write(records); err != nil {
-		panic(err)
-	}
-	w.Flush()
-	return "[" + strings.TrimSpace(buf.String()) + "]"
-}
-
-func stringToStringConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// An empty string would cause an empty map
-	if len(val) == 0 {
-		return map[string]string{}, nil
-	}
-	r := csv.NewReader(strings.NewReader(val))
-	ss, err := r.Read()
-	if err != nil {
-		return nil, err
-	}
-	out := make(map[string]string, len(ss))
-	for _, pair := range ss {
-		kv := strings.SplitN(pair, "=", 2)
-		if len(kv) != 2 {
-			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
-		}
-		out[kv[0]] = kv[1]
-	}
-	return out, nil
-}
-
-// GetStringToString return the map[string]string value of a flag with the given name
-func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
-	val, err := f.getFlagType(name, "stringToString", stringToStringConv)
-	if err != nil {
-		return map[string]string{}, err
-	}
-	return val.(map[string]string), nil
-}
-
-// StringToStringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
-// The value of each argument will not try to be separated by comma
-func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
-	f.VarP(newStringToStringValue(value, p), name, "", usage)
-}
-
-// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
-	f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
-}
-
-// StringToStringVar defines a string flag with specified name, default value, and usage string.
-// The argument p points to a map[string]string variable in which to store the value of the flag.
-// The value of each argument will not try to be separated by comma
-func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
-	CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
-}
-
-// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
-func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
-	CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
-}
-
-// StringToString defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a map[string]string variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma
-func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
-	p := map[string]string{}
-	f.StringToStringVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
-	p := map[string]string{}
-	f.StringToStringVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// StringToString defines a string flag with specified name, default value, and usage string.
-// The return value is the address of a map[string]string variable that stores the value of the flag.
-// The value of each argument will not try to be separated by comma
-func StringToString(name string, value map[string]string, usage string) *map[string]string {
-	return CommandLine.StringToStringP(name, "", value, usage)
-}
-
-// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
-func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
-	return CommandLine.StringToStringP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
deleted file mode 100644
index dcbc2b7..0000000
--- a/vendor/github.com/spf13/pflag/uint.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- uint Value
-type uintValue uint
-
-func newUintValue(val uint, p *uint) *uintValue {
-	*p = val
-	return (*uintValue)(p)
-}
-
-func (i *uintValue) Set(s string) error {
-	v, err := strconv.ParseUint(s, 0, 64)
-	*i = uintValue(v)
-	return err
-}
-
-func (i *uintValue) Type() string {
-	return "uint"
-}
-
-func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-func uintConv(sval string) (interface{}, error) {
-	v, err := strconv.ParseUint(sval, 0, 0)
-	if err != nil {
-		return 0, err
-	}
-	return uint(v), nil
-}
-
-// GetUint return the uint value of a flag with the given name
-func (f *FlagSet) GetUint(name string) (uint, error) {
-	val, err := f.getFlagType(name, "uint", uintConv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(uint), nil
-}
-
-// UintVar defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint variable in which to store the value of the flag.
-func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
-	f.VarP(newUintValue(value, p), name, "", usage)
-}
-
-// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
-	f.VarP(newUintValue(value, p), name, shorthand, usage)
-}
-
-// UintVar defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint  variable in which to store the value of the flag.
-func UintVar(p *uint, name string, value uint, usage string) {
-	CommandLine.VarP(newUintValue(value, p), name, "", usage)
-}
-
-// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
-func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
-	CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
-}
-
-// Uint defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint  variable that stores the value of the flag.
-func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
-	p := new(uint)
-	f.UintVarP(p, name, "", value, usage)
-	return p
-}
-
-// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
-	p := new(uint)
-	f.UintVarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Uint defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint  variable that stores the value of the flag.
-func Uint(name string, value uint, usage string) *uint {
-	return CommandLine.UintP(name, "", value, usage)
-}
-
-// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
-func UintP(name, shorthand string, value uint, usage string) *uint {
-	return CommandLine.UintP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
deleted file mode 100644
index 7e9914e..0000000
--- a/vendor/github.com/spf13/pflag/uint16.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- uint16 value
-type uint16Value uint16
-
-func newUint16Value(val uint16, p *uint16) *uint16Value {
-	*p = val
-	return (*uint16Value)(p)
-}
-
-func (i *uint16Value) Set(s string) error {
-	v, err := strconv.ParseUint(s, 0, 16)
-	*i = uint16Value(v)
-	return err
-}
-
-func (i *uint16Value) Type() string {
-	return "uint16"
-}
-
-func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-func uint16Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseUint(sval, 0, 16)
-	if err != nil {
-		return 0, err
-	}
-	return uint16(v), nil
-}
-
-// GetUint16 return the uint16 value of a flag with the given name
-func (f *FlagSet) GetUint16(name string) (uint16, error) {
-	val, err := f.getFlagType(name, "uint16", uint16Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(uint16), nil
-}
-
-// Uint16Var defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint variable in which to store the value of the flag.
-func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
-	f.VarP(newUint16Value(value, p), name, "", usage)
-}
-
-// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
-	f.VarP(newUint16Value(value, p), name, shorthand, usage)
-}
-
-// Uint16Var defines a uint flag with specified name, default value, and usage string.
-// The argument p points to a uint  variable in which to store the value of the flag.
-func Uint16Var(p *uint16, name string, value uint16, usage string) {
-	CommandLine.VarP(newUint16Value(value, p), name, "", usage)
-}
-
-// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
-func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
-	CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
-}
-
-// Uint16 defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint  variable that stores the value of the flag.
-func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
-	p := new(uint16)
-	f.Uint16VarP(p, name, "", value, usage)
-	return p
-}
-
-// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
-	p := new(uint16)
-	f.Uint16VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Uint16 defines a uint flag with specified name, default value, and usage string.
-// The return value is the address of a uint  variable that stores the value of the flag.
-func Uint16(name string, value uint16, usage string) *uint16 {
-	return CommandLine.Uint16P(name, "", value, usage)
-}
-
-// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
-func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
-	return CommandLine.Uint16P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
deleted file mode 100644
index d802453..0000000
--- a/vendor/github.com/spf13/pflag/uint32.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- uint32 value
-type uint32Value uint32
-
-func newUint32Value(val uint32, p *uint32) *uint32Value {
-	*p = val
-	return (*uint32Value)(p)
-}
-
-func (i *uint32Value) Set(s string) error {
-	v, err := strconv.ParseUint(s, 0, 32)
-	*i = uint32Value(v)
-	return err
-}
-
-func (i *uint32Value) Type() string {
-	return "uint32"
-}
-
-func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-func uint32Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseUint(sval, 0, 32)
-	if err != nil {
-		return 0, err
-	}
-	return uint32(v), nil
-}
-
-// GetUint32 return the uint32 value of a flag with the given name
-func (f *FlagSet) GetUint32(name string) (uint32, error) {
-	val, err := f.getFlagType(name, "uint32", uint32Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(uint32), nil
-}
-
-// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
-// The argument p points to a uint32 variable in which to store the value of the flag.
-func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
-	f.VarP(newUint32Value(value, p), name, "", usage)
-}
-
-// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
-	f.VarP(newUint32Value(value, p), name, shorthand, usage)
-}
-
-// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
-// The argument p points to a uint32  variable in which to store the value of the flag.
-func Uint32Var(p *uint32, name string, value uint32, usage string) {
-	CommandLine.VarP(newUint32Value(value, p), name, "", usage)
-}
-
-// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
-func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
-	CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
-}
-
-// Uint32 defines a uint32 flag with specified name, default value, and usage string.
-// The return value is the address of a uint32  variable that stores the value of the flag.
-func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
-	p := new(uint32)
-	f.Uint32VarP(p, name, "", value, usage)
-	return p
-}
-
-// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
-	p := new(uint32)
-	f.Uint32VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Uint32 defines a uint32 flag with specified name, default value, and usage string.
-// The return value is the address of a uint32  variable that stores the value of the flag.
-func Uint32(name string, value uint32, usage string) *uint32 {
-	return CommandLine.Uint32P(name, "", value, usage)
-}
-
-// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
-func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
-	return CommandLine.Uint32P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
deleted file mode 100644
index f62240f..0000000
--- a/vendor/github.com/spf13/pflag/uint64.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- uint64 Value
-type uint64Value uint64
-
-func newUint64Value(val uint64, p *uint64) *uint64Value {
-	*p = val
-	return (*uint64Value)(p)
-}
-
-func (i *uint64Value) Set(s string) error {
-	v, err := strconv.ParseUint(s, 0, 64)
-	*i = uint64Value(v)
-	return err
-}
-
-func (i *uint64Value) Type() string {
-	return "uint64"
-}
-
-func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-func uint64Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseUint(sval, 0, 64)
-	if err != nil {
-		return 0, err
-	}
-	return uint64(v), nil
-}
-
-// GetUint64 return the uint64 value of a flag with the given name
-func (f *FlagSet) GetUint64(name string) (uint64, error) {
-	val, err := f.getFlagType(name, "uint64", uint64Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(uint64), nil
-}
-
-// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
-// The argument p points to a uint64 variable in which to store the value of the flag.
-func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
-	f.VarP(newUint64Value(value, p), name, "", usage)
-}
-
-// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
-	f.VarP(newUint64Value(value, p), name, shorthand, usage)
-}
-
-// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
-// The argument p points to a uint64 variable in which to store the value of the flag.
-func Uint64Var(p *uint64, name string, value uint64, usage string) {
-	CommandLine.VarP(newUint64Value(value, p), name, "", usage)
-}
-
-// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
-func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
-	CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
-}
-
-// Uint64 defines a uint64 flag with specified name, default value, and usage string.
-// The return value is the address of a uint64 variable that stores the value of the flag.
-func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
-	p := new(uint64)
-	f.Uint64VarP(p, name, "", value, usage)
-	return p
-}
-
-// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
-	p := new(uint64)
-	f.Uint64VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Uint64 defines a uint64 flag with specified name, default value, and usage string.
-// The return value is the address of a uint64 variable that stores the value of the flag.
-func Uint64(name string, value uint64, usage string) *uint64 {
-	return CommandLine.Uint64P(name, "", value, usage)
-}
-
-// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
-func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
-	return CommandLine.Uint64P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
deleted file mode 100644
index bb0e83c..0000000
--- a/vendor/github.com/spf13/pflag/uint8.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pflag
-
-import "strconv"
-
-// -- uint8 Value
-type uint8Value uint8
-
-func newUint8Value(val uint8, p *uint8) *uint8Value {
-	*p = val
-	return (*uint8Value)(p)
-}
-
-func (i *uint8Value) Set(s string) error {
-	v, err := strconv.ParseUint(s, 0, 8)
-	*i = uint8Value(v)
-	return err
-}
-
-func (i *uint8Value) Type() string {
-	return "uint8"
-}
-
-func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
-
-func uint8Conv(sval string) (interface{}, error) {
-	v, err := strconv.ParseUint(sval, 0, 8)
-	if err != nil {
-		return 0, err
-	}
-	return uint8(v), nil
-}
-
-// GetUint8 return the uint8 value of a flag with the given name
-func (f *FlagSet) GetUint8(name string) (uint8, error) {
-	val, err := f.getFlagType(name, "uint8", uint8Conv)
-	if err != nil {
-		return 0, err
-	}
-	return val.(uint8), nil
-}
-
-// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
-// The argument p points to a uint8 variable in which to store the value of the flag.
-func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
-	f.VarP(newUint8Value(value, p), name, "", usage)
-}
-
-// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
-	f.VarP(newUint8Value(value, p), name, shorthand, usage)
-}
-
-// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
-// The argument p points to a uint8 variable in which to store the value of the flag.
-func Uint8Var(p *uint8, name string, value uint8, usage string) {
-	CommandLine.VarP(newUint8Value(value, p), name, "", usage)
-}
-
-// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
-func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
-	CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
-}
-
-// Uint8 defines a uint8 flag with specified name, default value, and usage string.
-// The return value is the address of a uint8 variable that stores the value of the flag.
-func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
-	p := new(uint8)
-	f.Uint8VarP(p, name, "", value, usage)
-	return p
-}
-
-// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
-	p := new(uint8)
-	f.Uint8VarP(p, name, shorthand, value, usage)
-	return p
-}
-
-// Uint8 defines a uint8 flag with specified name, default value, and usage string.
-// The return value is the address of a uint8 variable that stores the value of the flag.
-func Uint8(name string, value uint8, usage string) *uint8 {
-	return CommandLine.Uint8P(name, "", value, usage)
-}
-
-// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
-func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
-	return CommandLine.Uint8P(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
deleted file mode 100644
index edd94c6..0000000
--- a/vendor/github.com/spf13/pflag/uint_slice.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package pflag
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// -- uintSlice Value
-type uintSliceValue struct {
-	value   *[]uint
-	changed bool
-}
-
-func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
-	uisv := new(uintSliceValue)
-	uisv.value = p
-	*uisv.value = val
-	return uisv
-}
-
-func (s *uintSliceValue) Set(val string) error {
-	ss := strings.Split(val, ",")
-	out := make([]uint, len(ss))
-	for i, d := range ss {
-		u, err := strconv.ParseUint(d, 10, 0)
-		if err != nil {
-			return err
-		}
-		out[i] = uint(u)
-	}
-	if !s.changed {
-		*s.value = out
-	} else {
-		*s.value = append(*s.value, out...)
-	}
-	s.changed = true
-	return nil
-}
-
-func (s *uintSliceValue) Type() string {
-	return "uintSlice"
-}
-
-func (s *uintSliceValue) String() string {
-	out := make([]string, len(*s.value))
-	for i, d := range *s.value {
-		out[i] = fmt.Sprintf("%d", d)
-	}
-	return "[" + strings.Join(out, ",") + "]"
-}
-
-func uintSliceConv(val string) (interface{}, error) {
-	val = strings.Trim(val, "[]")
-	// Empty string would cause a slice with one (empty) entry
-	if len(val) == 0 {
-		return []uint{}, nil
-	}
-	ss := strings.Split(val, ",")
-	out := make([]uint, len(ss))
-	for i, d := range ss {
-		u, err := strconv.ParseUint(d, 10, 0)
-		if err != nil {
-			return nil, err
-		}
-		out[i] = uint(u)
-	}
-	return out, nil
-}
-
-// GetUintSlice returns the []uint value of a flag with the given name.
-func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
-	val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
-	if err != nil {
-		return []uint{}, err
-	}
-	return val.([]uint), nil
-}
-
-// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
-// The argument p points to a []uint variable in which to store the value of the flag.
-func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
-	f.VarP(newUintSliceValue(value, p), name, "", usage)
-}
-
-// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
-	f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
-}
-
-// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
-// The argument p points to a uint[] variable in which to store the value of the flag.
-func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
-	CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
-}
-
-// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
-func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
-	CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
-}
-
-// UintSlice defines a []uint flag with specified name, default value, and usage string.
-// The return value is the address of a []uint variable that stores the value of the flag.
-func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
-	p := []uint{}
-	f.UintSliceVarP(&p, name, "", value, usage)
-	return &p
-}
-
-// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
-func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
-	p := []uint{}
-	f.UintSliceVarP(&p, name, shorthand, value, usage)
-	return &p
-}
-
-// UintSlice defines a []uint flag with specified name, default value, and usage string.
-// The return value is the address of a []uint variable that stores the value of the flag.
-func UintSlice(name string, value []uint, usage string) *[]uint {
-	return CommandLine.UintSliceP(name, "", value, usage)
-}
-
-// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
-func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
-	return CommandLine.UintSliceP(name, shorthand, value, usage)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index aa1c2b9..e0364e9 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -113,6 +113,17 @@
 	return Error(t, err, append([]interface{}{msg}, args...)...)
 }
 
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+//    assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
 // Exactlyf asserts that two objects are equal in value and type.
 //
 //    assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
@@ -157,6 +168,31 @@
 	return FileExists(t, path, append([]interface{}{msg}, args...)...)
 }
 
+// Greaterf asserts that the first element is greater than the second
+//
+//    assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+//    assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+//    assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+//    assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+//    assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+//    assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+//    assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
 // HTTPBodyContainsf asserts that a specified handler returns a
 // body that contains a string.
 //
@@ -289,6 +325,14 @@
 	return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
@@ -300,6 +344,31 @@
 	return Len(t, object, length, append([]interface{}{msg}, args...)...)
 }
 
+// Lessf asserts that the first element is less than the second
+//
+//    assert.Lessf(t, 1, 2, "error message %s", "formatted")
+//    assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+//    assert.Lessf(t, "a", "b", "error message %s", "formatted")
+func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+//    assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+//    assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+//    assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+//    assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
 // Nilf asserts that the specified object is nil.
 //
 //    assert.Nilf(t, err, "error message %s", "formatted")
@@ -444,6 +513,19 @@
 	return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
 }
 
+// Samef asserts that two pointers reference the same object.
+//
+//    assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
 // Subsetf asserts that the specified list(array, slice...) contains all
 // elements given in the specified subset(array, slice...).
 //
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index de39f79..2683040 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -215,6 +215,28 @@
 	return Errorf(a.t, err, msg, args...)
 }
 
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+//    a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+//    a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
+}
+
 // Exactly asserts that two objects are equal in value and type.
 //
 //    a.Exactly(int32(123), int64(123))
@@ -303,6 +325,56 @@
 	return FileExistsf(a.t, path, msg, args...)
 }
 
+// Greater asserts that the first element is greater than the second
+//
+//    a.Greater(2, 1)
+//    a.Greater(float64(2), float64(1))
+//    a.Greater("b", "a")
+func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Greater(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+//    a.GreaterOrEqual(2, 1)
+//    a.GreaterOrEqual(2, 2)
+//    a.GreaterOrEqual("b", "a")
+//    a.GreaterOrEqual("b", "b")
+func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+//    a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+//    a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+//    a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+//    a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return GreaterOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+//    a.Greaterf(2, 1, "error message %s", "formatted")
+//    a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+//    a.Greaterf("b", "a", "error message %s", "formatted")
+func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Greaterf(a.t, e1, e2, msg, args...)
+}
+
 // HTTPBodyContains asserts that a specified handler returns a
 // body that contains a string.
 //
@@ -567,6 +639,22 @@
 	return JSONEqf(a.t, expected, actual, msg, args...)
 }
 
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
@@ -589,6 +677,56 @@
 	return Lenf(a.t, object, length, msg, args...)
 }
 
+// Less asserts that the first element is less than the second
+//
+//    a.Less(1, 2)
+//    a.Less(float64(1), float64(2))
+//    a.Less("a", "b")
+func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Less(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+//    a.LessOrEqual(1, 2)
+//    a.LessOrEqual(2, 2)
+//    a.LessOrEqual("a", "b")
+//    a.LessOrEqual("b", "b")
+func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return LessOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+//    a.LessOrEqualf(1, 2, "error message %s", "formatted")
+//    a.LessOrEqualf(2, 2, "error message %s", "formatted")
+//    a.LessOrEqualf("a", "b", "error message %s", "formatted")
+//    a.LessOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return LessOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+//    a.Lessf(1, 2, "error message %s", "formatted")
+//    a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+//    a.Lessf("a", "b", "error message %s", "formatted")
+func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Lessf(a.t, e1, e2, msg, args...)
+}
+
 // Nil asserts that the specified object is nil.
 //
 //    a.Nil(err)
@@ -877,6 +1015,32 @@
 	return Regexpf(a.t, rx, str, msg, args...)
 }
 
+// Same asserts that two pointers reference the same object.
+//
+//    a.Same(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Same(a.t, expected, actual, msgAndArgs...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+//    a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Samef(a.t, expected, actual, msg, args...)
+}
+
 // Subset asserts that the specified list(array, slice...) contains all
 // elements given in the specified subset(array, slice...).
 //
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
new file mode 100644
index 0000000..15a486c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -0,0 +1,309 @@
+package assert
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
+	switch kind {
+	case reflect.Int:
+		{
+			intobj1 := obj1.(int)
+			intobj2 := obj2.(int)
+			if intobj1 > intobj2 {
+				return -1, true
+			}
+			if intobj1 == intobj2 {
+				return 0, true
+			}
+			if intobj1 < intobj2 {
+				return 1, true
+			}
+		}
+	case reflect.Int8:
+		{
+			int8obj1 := obj1.(int8)
+			int8obj2 := obj2.(int8)
+			if int8obj1 > int8obj2 {
+				return -1, true
+			}
+			if int8obj1 == int8obj2 {
+				return 0, true
+			}
+			if int8obj1 < int8obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Int16:
+		{
+			int16obj1 := obj1.(int16)
+			int16obj2 := obj2.(int16)
+			if int16obj1 > int16obj2 {
+				return -1, true
+			}
+			if int16obj1 == int16obj2 {
+				return 0, true
+			}
+			if int16obj1 < int16obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Int32:
+		{
+			int32obj1 := obj1.(int32)
+			int32obj2 := obj2.(int32)
+			if int32obj1 > int32obj2 {
+				return -1, true
+			}
+			if int32obj1 == int32obj2 {
+				return 0, true
+			}
+			if int32obj1 < int32obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Int64:
+		{
+			int64obj1 := obj1.(int64)
+			int64obj2 := obj2.(int64)
+			if int64obj1 > int64obj2 {
+				return -1, true
+			}
+			if int64obj1 == int64obj2 {
+				return 0, true
+			}
+			if int64obj1 < int64obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Uint:
+		{
+			uintobj1 := obj1.(uint)
+			uintobj2 := obj2.(uint)
+			if uintobj1 > uintobj2 {
+				return -1, true
+			}
+			if uintobj1 == uintobj2 {
+				return 0, true
+			}
+			if uintobj1 < uintobj2 {
+				return 1, true
+			}
+		}
+	case reflect.Uint8:
+		{
+			uint8obj1 := obj1.(uint8)
+			uint8obj2 := obj2.(uint8)
+			if uint8obj1 > uint8obj2 {
+				return -1, true
+			}
+			if uint8obj1 == uint8obj2 {
+				return 0, true
+			}
+			if uint8obj1 < uint8obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Uint16:
+		{
+			uint16obj1 := obj1.(uint16)
+			uint16obj2 := obj2.(uint16)
+			if uint16obj1 > uint16obj2 {
+				return -1, true
+			}
+			if uint16obj1 == uint16obj2 {
+				return 0, true
+			}
+			if uint16obj1 < uint16obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Uint32:
+		{
+			uint32obj1 := obj1.(uint32)
+			uint32obj2 := obj2.(uint32)
+			if uint32obj1 > uint32obj2 {
+				return -1, true
+			}
+			if uint32obj1 == uint32obj2 {
+				return 0, true
+			}
+			if uint32obj1 < uint32obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Uint64:
+		{
+			uint64obj1 := obj1.(uint64)
+			uint64obj2 := obj2.(uint64)
+			if uint64obj1 > uint64obj2 {
+				return -1, true
+			}
+			if uint64obj1 == uint64obj2 {
+				return 0, true
+			}
+			if uint64obj1 < uint64obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Float32:
+		{
+			float32obj1 := obj1.(float32)
+			float32obj2 := obj2.(float32)
+			if float32obj1 > float32obj2 {
+				return -1, true
+			}
+			if float32obj1 == float32obj2 {
+				return 0, true
+			}
+			if float32obj1 < float32obj2 {
+				return 1, true
+			}
+		}
+	case reflect.Float64:
+		{
+			float64obj1 := obj1.(float64)
+			float64obj2 := obj2.(float64)
+			if float64obj1 > float64obj2 {
+				return -1, true
+			}
+			if float64obj1 == float64obj2 {
+				return 0, true
+			}
+			if float64obj1 < float64obj2 {
+				return 1, true
+			}
+		}
+	case reflect.String:
+		{
+			stringobj1 := obj1.(string)
+			stringobj2 := obj2.(string)
+			if stringobj1 > stringobj2 {
+				return -1, true
+			}
+			if stringobj1 == stringobj2 {
+				return 0, true
+			}
+			if stringobj1 < stringobj2 {
+				return 1, true
+			}
+		}
+	}
+
+	return 0, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+//    assert.Greater(t, 2, 1)
+//    assert.Greater(t, float64(2), float64(1))
+//    assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != -1 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+//    assert.GreaterOrEqual(t, 2, 1)
+//    assert.GreaterOrEqual(t, 2, 2)
+//    assert.GreaterOrEqual(t, "b", "a")
+//    assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != -1 && res != 0 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Less asserts that the first element is less than the second
+//
+//    assert.Less(t, 1, 2)
+//    assert.Less(t, float64(1), float64(2))
+//    assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != 1 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+//    assert.LessOrEqual(t, 1, 2)
+//    assert.LessOrEqual(t, 2, 2)
+//    assert.LessOrEqual(t, "a", "b")
+//    assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	e1Kind := reflect.ValueOf(e1).Kind()
+	e2Kind := reflect.ValueOf(e2).Kind()
+	if e1Kind != e2Kind {
+		return Fail(t, "Elements should be the same type", msgAndArgs...)
+	}
+
+	res, isComparable := compare(e1, e2, e1Kind)
+	if !isComparable {
+		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+	}
+
+	if res != 1 && res != 0 {
+		return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
+	}
+
+	return true
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 9bd4a80..044da8b 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -18,6 +18,7 @@
 
 	"github.com/davecgh/go-spew/spew"
 	"github.com/pmezard/go-difflib/difflib"
+	yaml "gopkg.in/yaml.v2"
 )
 
 //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
@@ -350,6 +351,37 @@
 
 }
 
+// Same asserts that two pointers reference the same object.
+//
+//    assert.Same(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual)
+	if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
+		return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
+	}
+
+	expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
+	if expectedType != actualType {
+		return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
+			expectedType, actualType), msgAndArgs...)
+	}
+
+	if expected != actual {
+		return Fail(t, fmt.Sprintf("Not same: \n"+
+			"expected: %p %#v\n"+
+			"actual  : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+	}
+
+	return true
+}
+
 // formatUnequalValues takes two values of arbitrary types and returns string
 // representations appropriate to be presented to the user.
 //
@@ -479,14 +511,14 @@
 	// collection types are empty when they have no element
 	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
 		return objValue.Len() == 0
-	// pointers are empty if nil or if the value they point to is empty
+		// pointers are empty if nil or if the value they point to is empty
 	case reflect.Ptr:
 		if objValue.IsNil() {
 			return true
 		}
 		deref := objValue.Elem().Interface()
 		return isEmpty(deref)
-	// for all other types, compare against the zero value
+		// for all other types, compare against the zero value
 	default:
 		zero := reflect.Zero(objValue.Type())
 		return reflect.DeepEqual(object, zero.Interface())
@@ -629,7 +661,7 @@
 func includeElement(list interface{}, element interface{}) (ok, found bool) {
 
 	listValue := reflect.ValueOf(list)
-	elementValue := reflect.ValueOf(element)
+	listKind := reflect.TypeOf(list).Kind()
 	defer func() {
 		if e := recover(); e != nil {
 			ok = false
@@ -637,11 +669,12 @@
 		}
 	}()
 
-	if reflect.TypeOf(list).Kind() == reflect.String {
+	if listKind == reflect.String {
+		elementValue := reflect.ValueOf(element)
 		return true, strings.Contains(listValue.String(), elementValue.String())
 	}
 
-	if reflect.TypeOf(list).Kind() == reflect.Map {
+	if listKind == reflect.Map {
 		mapKeys := listValue.MapKeys()
 		for i := 0; i < len(mapKeys); i++ {
 			if ObjectsAreEqual(mapKeys[i].Interface(), element) {
@@ -1337,6 +1370,24 @@
 	return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
 }
 
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
+
+	if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+	}
+
+	if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
+	}
+
+	return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
+}
+
 func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
 	t := reflect.TypeOf(v)
 	k := t.Kind()
@@ -1371,8 +1422,8 @@
 		e = spewConfig.Sdump(expected)
 		a = spewConfig.Sdump(actual)
 	} else {
-		e = expected.(string)
-		a = actual.(string)
+		e = reflect.ValueOf(expected).String()
+		a = reflect.ValueOf(actual).String()
 	}
 
 	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@@ -1414,3 +1465,34 @@
 type tHelper interface {
 	Helper()
 }
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+//    assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	timer := time.NewTimer(waitFor)
+	ticker := time.NewTicker(tick)
+	checkPassed := make(chan bool)
+	defer timer.Stop()
+	defer ticker.Stop()
+	defer close(checkPassed)
+	for {
+		select {
+		case <-timer.C:
+			return Fail(t, "Condition never satisfied", msgAndArgs...)
+		case result := <-checkPassed:
+			if result {
+				return true
+			}
+		case <-ticker.C:
+			go func() {
+				checkPassed <- condition()
+			}()
+		}
+	}
+}