[SEBA-930] update GRPC version to 1.27 and change kafka message producing

Change-Id: I14145a1351eb2523fa54e66381ad97abc5eedf50
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
index d609423..cace313 100644
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -1,7 +1,6 @@
 dist: xenial
 language: go
 go:
-- 1.11.x
 - 1.12.x
 - 1.13.x
 
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
index dfa7e75..844f481 100644
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -1,5 +1,29 @@
 # Changelog
 
+#### Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/Shopify/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/Shopify/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/Shopify/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/Shopify/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/Shopify/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/Shopify/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/Shopify/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/Shopify/sarama/pull/1545)).
+
 #### Version 1.24.1 (2019-10-31)
 
 New Features:
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
index 0206fac..18ad7bf 100644
--- a/vendor/github.com/Shopify/sarama/README.md
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -20,7 +20,7 @@
 Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
 the two latest stable releases of Kafka and Go, and we provide a two month
 grace period for older releases. This means we currently officially support
-Go 1.11 through 1.13, and Kafka 2.1 through 2.3, although older releases are
+Go 1.12 through 1.13, and Kafka 2.1 through 2.4, although older releases are
 still likely to work.
 
 Sarama follows semantic versioning and provides API stability via the gopkg.in service.
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
index 040cfe9..e5b3557 100644
--- a/vendor/github.com/Shopify/sarama/client.go
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -242,6 +242,9 @@
 }
 
 func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
 	return client.brokers == nil
 }
 
@@ -529,6 +532,11 @@
 // in the brokers map. It returns the broker that is registered, which may be the provided broker,
 // or a previously registered Broker instance. You must hold the write lock before calling this function.
 func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
 	if client.brokers[broker.ID()] == nil {
 		client.brokers[broker.ID()] = broker
 		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
@@ -822,7 +830,7 @@
 	}
 
 	if broker != nil {
-		Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
 		return retry(ErrOutOfBrokers)
 	}
 
@@ -833,6 +841,10 @@
 
 // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
 func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
 	client.lock.Lock()
 	defer client.lock.Unlock()
 
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
index e515e04..69c7161 100644
--- a/vendor/github.com/Shopify/sarama/config.go
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -338,8 +338,15 @@
 		// offsets. This currently requires the manual use of an OffsetManager
 		// but will eventually be automated.
 		Offsets struct {
-			// How frequently to commit updated offsets. Defaults to 1s.
-			CommitInterval time.Duration
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
 
 			// The initial offset to use if no offset was previously committed.
 			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
@@ -423,7 +430,8 @@
 	c.Consumer.MaxWaitTime = 250 * time.Millisecond
 	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
 	c.Consumer.Return.Errors = false
-	c.Consumer.Offsets.CommitInterval = 1 * time.Second
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
 	c.Consumer.Offsets.Initial = OffsetNewest
 	c.Consumer.Offsets.Retry.Max = 3
 
@@ -650,7 +658,7 @@
 		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
 	case c.Consumer.Retry.Backoff < 0:
 		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
-	case c.Consumer.Offsets.CommitInterval <= 0:
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
 		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
 	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
 		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
index da99e88..b974dd9 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -417,12 +417,6 @@
 }
 
 func (c *consumerGroup) handleError(err error, topic string, partition int32) {
-	select {
-	case <-c.closed:
-		return
-	default:
-	}
-
 	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
 		err = &ConsumerError{
 			Topic:     topic,
@@ -431,13 +425,25 @@
 		}
 	}
 
-	if c.config.Consumer.Return.Errors {
-		select {
-		case c.errors <- err:
-		default:
-		}
-	} else {
+	if !c.config.Consumer.Return.Errors {
 		Logger.Println(err)
+		return
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	select {
+	case <-c.closed:
+		//consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
 	}
 }
 
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
index 481f681..4c030de 100644
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -2,7 +2,7 @@
 
 up:
   - go:
-      version: '1.13.1'
+      version: '1.13.4'
 
 commands:
   test:
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
index 4337c00..8ba2c91 100644
--- a/vendor/github.com/Shopify/sarama/go.mod
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -13,7 +13,7 @@
 	github.com/golang/snappy v0.0.1 // indirect
 	github.com/hashicorp/go-uuid v1.0.1 // indirect
 	github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 // indirect
-	github.com/klauspost/compress v1.8.2
+	github.com/klauspost/compress v1.9.7
 	github.com/pierrec/lz4 v2.2.6+incompatible
 	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
 	github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
index d2f04ee..7f61258 100644
--- a/vendor/github.com/Shopify/sarama/go.sum
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -27,6 +27,8 @@
 github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=
 github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
+github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
index 923972f..e40f429 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -58,7 +58,7 @@
 		client: client,
 		conf:   conf,
 		group:  group,
-		ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
+		ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval),
 		poms:   make(map[string]map[int32]*partitionOffsetManager),
 
 		memberID:   memberID,
@@ -233,7 +233,12 @@
 	}
 }
 
+// flushToBroker is ignored if auto-commit offsets is disabled
 func (om *offsetManager) flushToBroker() {
+	if !om.conf.Consumer.Offsets.AutoCommit.Enable {
+		return
+	}
+
 	req := om.constructRequest()
 	if req == nil {
 		return
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
index bba0f7e..b684aa4 100644
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -44,9 +44,10 @@
 	}
 
 	timestamp := msg.Timestamp
-	if msg.Timestamp.IsZero() {
+	if timestamp.IsZero() {
 		timestamp = time.Now()
 	}
+	timestamp = timestamp.Truncate(time.Millisecond)
 
 	partitions := ps.msgs[msg.Topic]
 	if partitions == nil {
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
index 7c815cd..9392793 100644
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -161,6 +161,7 @@
 	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
 	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
 	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
 
 	SupportedVersions = []KafkaVersion{
 		V0_8_2_0,
@@ -185,9 +186,10 @@
 		V2_1_0_0,
 		V2_2_0_0,
 		V2_3_0_0,
+		V2_4_0_0,
 	}
 	MinVersion = V0_8_2_0
-	MaxVersion = V2_3_0_0
+	MaxVersion = V2_4_0_0
 )
 
 //ParseKafkaVersion parses and returns kafka version or error from a string
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec0c3fc..bda4021 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -38,7 +38,7 @@
 	b.nBits += bits
 }
 
-// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
 // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
 	enc := ct[symbol]
@@ -46,6 +46,17 @@
 	b.nBits += enc.nBits
 }
 
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+	encA := ct[av]
+	encB := ct[bv]
+	sh := b.nBits & 63
+	combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+	b.bitContainer |= combined << sh
+	b.nBits += encA.nBits + encB.nBits
+}
+
 // addBits16ZeroNC will add up to 16 bits.
 // It will not check if there is space for them,
 // so the caller must ensure that it has flushed recently.
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index dd4f7fe..0843cb0 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -54,6 +54,12 @@
 		canReuse = s.canUseTable(s.prevTable)
 	}
 
+	// We want the output size to be less than this:
+	wantSize := len(in)
+	if s.WantLogLess > 0 {
+		wantSize -= wantSize >> s.WantLogLess
+	}
+
 	// Reset for next run.
 	s.clearCount = true
 	s.maxCount = 0
@@ -74,10 +80,13 @@
 
 	if s.Reuse == ReusePolicyPrefer && canReuse {
 		keepTable := s.cTable
+		keepTL := s.actualTableLog
 		s.cTable = s.prevTable
+		s.actualTableLog = s.prevTableLog
 		s.Out, err = compressor(in)
 		s.cTable = keepTable
-		if err == nil && len(s.Out) < len(in) {
+		s.actualTableLog = keepTL
+		if err == nil && len(s.Out) < wantSize {
 			s.OutData = s.Out
 			return s.Out, true, nil
 		}
@@ -86,7 +95,6 @@
 	}
 
 	// Calculate new table.
-	s.optimalTableLog()
 	err = s.buildCTable()
 	if err != nil {
 		return nil, false, err
@@ -100,13 +108,22 @@
 		hSize := len(s.Out)
 		oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
 		newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
-		if oldSize <= hSize+newSize || hSize+12 >= len(in) {
+		if oldSize <= hSize+newSize || hSize+12 >= wantSize {
 			// Retain cTable even if we re-use.
 			keepTable := s.cTable
+			keepTL := s.actualTableLog
+
 			s.cTable = s.prevTable
+			s.actualTableLog = s.prevTableLog
 			s.Out, err = compressor(in)
+
+			// Restore ctable.
 			s.cTable = keepTable
-			if len(s.Out) >= len(in) {
+			s.actualTableLog = keepTL
+			if err != nil {
+				return nil, false, err
+			}
+			if len(s.Out) >= wantSize {
 				return nil, false, ErrIncompressible
 			}
 			s.OutData = s.Out
@@ -128,12 +145,12 @@
 		s.OutTable = nil
 		return nil, false, err
 	}
-	if len(s.Out) >= len(in) {
+	if len(s.Out) >= wantSize {
 		s.OutTable = nil
 		return nil, false, ErrIncompressible
 	}
 	// Move current table into previous.
-	s.prevTable, s.cTable = s.cTable, s.prevTable[:0]
+	s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
 	s.OutData = s.Out[len(s.OutTable):]
 	return s.Out, false, nil
 }
@@ -154,28 +171,23 @@
 	for i := len(src) & 3; i > 0; i-- {
 		bw.encSymbol(cTable, src[n+i-1])
 	}
+	n -= 4
 	if s.actualTableLog <= 8 {
-		n -= 4
 		for ; n >= 0; n -= 4 {
 			tmp := src[n : n+4]
 			// tmp should be len 4
 			bw.flush32()
-			bw.encSymbol(cTable, tmp[3])
-			bw.encSymbol(cTable, tmp[2])
-			bw.encSymbol(cTable, tmp[1])
-			bw.encSymbol(cTable, tmp[0])
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 		}
 	} else {
-		n -= 4
 		for ; n >= 0; n -= 4 {
 			tmp := src[n : n+4]
 			// tmp should be len 4
 			bw.flush32()
-			bw.encSymbol(cTable, tmp[3])
-			bw.encSymbol(cTable, tmp[2])
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
 			bw.flush32()
-			bw.encSymbol(cTable, tmp[1])
-			bw.encSymbol(cTable, tmp[0])
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 		}
 	}
 	err := bw.close()
@@ -313,9 +325,26 @@
 	return true
 }
 
+func (s *Scratch) validateTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 {
+			if c[i].nBits == 0 {
+				return false
+			}
+			if c[i].nBits > s.actualTableLog {
+				return false
+			}
+		}
+	}
+	return true
+}
+
 // minTableLog provides the minimum logSize to safely represent a distribution.
 func (s *Scratch) minTableLog() uint8 {
-	minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1
+	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
 	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
 	if minBitsSrc < minBitsSymbols {
 		return uint8(minBitsSrc)
@@ -327,7 +356,7 @@
 func (s *Scratch) optimalTableLog() {
 	tableLog := s.TableLog
 	minBits := s.minTableLog()
-	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2
+	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
 	if maxBitsSrc < tableLog {
 		// Accuracy can be reduced
 		tableLog = maxBitsSrc
@@ -354,6 +383,7 @@
 const huffNodesMask = huffNodesLen - 1
 
 func (s *Scratch) buildCTable() error {
+	s.optimalTableLog()
 	s.huffSort()
 	if cap(s.cTable) < maxSymbolValue+1 {
 		s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
@@ -430,7 +460,7 @@
 		return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
 	}
 	var nbPerRank [tableLogMax + 1]uint16
-	var valPerRank [tableLogMax + 1]uint16
+	var valPerRank [16]uint16
 	for _, v := range huffNode[:nonNullRank+1] {
 		nbPerRank[v.nbBits]++
 	}
@@ -446,16 +476,17 @@
 	}
 
 	// push nbBits per symbol, symbol order
-	// TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt)
 	for _, v := range huffNode[:nonNullRank+1] {
 		s.cTable[v.symbol].nBits = v.nbBits
 	}
 
 	// assign value within rank, symbol order
-	for n, val := range s.cTable[:s.symbolLen] {
-		v := valPerRank[val.nBits]
-		s.cTable[n].val = v
-		valPerRank[val.nBits] = v + 1
+	t := s.cTable[:s.symbolLen]
+	for n, val := range t {
+		nbits := val.nBits & 15
+		v := valPerRank[nbits]
+		t[n].val = v
+		valPerRank[nbits] = v + 1
 	}
 
 	return nil
@@ -479,10 +510,12 @@
 		r := highBit32(v+1) & 31
 		rank[r].base++
 	}
-	for n := 30; n > 0; n-- {
+	// maxBitLength is log2(BlockSizeMax) + 1
+	const maxBitLength = 18 + 1
+	for n := maxBitLength; n > 0; n-- {
 		rank[n-1].base += rank[n].base
 	}
-	for n := range rank[:] {
+	for n := range rank[:maxBitLength] {
 		rank[n].current = rank[n].base
 	}
 	for n, c := range s.count[:s.symbolLen] {
@@ -501,7 +534,7 @@
 }
 
 func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
-	maxNbBits := s.TableLog
+	maxNbBits := s.actualTableLog
 	huffNode := s.nodes[1 : huffNodesLen+1]
 	//huffNode = huffNode[: huffNodesLen]
 
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 43b4815..97ae66a 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -15,8 +15,7 @@
 
 // single-symbols decoding
 type dEntrySingle struct {
-	byte  uint8
-	nBits uint8
+	entry uint16
 }
 
 // double-symbols decoding
@@ -76,14 +75,15 @@
 	}
 
 	// collect weight stats
-	var rankStats [tableLogMax + 1]uint32
+	var rankStats [16]uint32
 	weightTotal := uint32(0)
 	for _, v := range s.huffWeight[:s.symbolLen] {
 		if v > tableLogMax {
 			return s, nil, errors.New("corrupt input: weight too large")
 		}
-		rankStats[v]++
-		weightTotal += (1 << (v & 15)) >> 1
+		v2 := v & 15
+		rankStats[v2]++
+		weightTotal += (1 << v2) >> 1
 	}
 	if weightTotal == 0 {
 		return s, nil, errors.New("corrupt input: weights zero")
@@ -134,15 +134,17 @@
 	if len(s.dt.single) != tSize {
 		s.dt.single = make([]dEntrySingle, tSize)
 	}
-
 	for n, w := range s.huffWeight[:s.symbolLen] {
+		if w == 0 {
+			continue
+		}
 		length := (uint32(1) << w) >> 1
 		d := dEntrySingle{
-			byte:  uint8(n),
-			nBits: s.actualTableLog + 1 - w,
+			entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
 		}
-		for u := rankStats[w]; u < rankStats[w]+length; u++ {
-			s.dt.single[u] = d
+		single := s.dt.single[rankStats[w] : rankStats[w]+length]
+		for i := range single {
+			single[i] = d
 		}
 		rankStats[w] += length
 	}
@@ -167,12 +169,12 @@
 	decode := func() byte {
 		val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
 		v := s.dt.single[val]
-		br.bitsRead += v.nBits
-		return v.byte
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
 	}
 	hasDec := func(v dEntrySingle) byte {
-		br.bitsRead += v.nBits
-		return v.byte
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
 	}
 
 	// Avoid bounds check by always having full sized table.
@@ -269,33 +271,81 @@
 	decode := func(br *bitReader) byte {
 		val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
 		v := single[val&tlMask]
-		br.bitsRead += v.nBits
-		return v.byte
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
 	}
 
 	// Use temp table to avoid bound checks/append penalty.
 	var tmp = s.huffWeight[:256]
 	var off uint8
+	var decoded int
 
 	// Decode 2 values from each decoder/loop.
 	const bufoff = 256 / 4
 bigloop:
 	for {
 		for i := range br {
-			if br[i].off < 4 {
+			br := &br[i]
+			if br.off < 4 {
 				break bigloop
 			}
-			br[i].fillFast()
+			br.fillFast()
 		}
-		tmp[off] = decode(&br[0])
-		tmp[off+bufoff] = decode(&br[1])
-		tmp[off+bufoff*2] = decode(&br[2])
-		tmp[off+bufoff*3] = decode(&br[3])
-		tmp[off+1] = decode(&br[0])
-		tmp[off+1+bufoff] = decode(&br[1])
-		tmp[off+1+bufoff*2] = decode(&br[2])
-		tmp[off+1+bufoff*3] = decode(&br[3])
+
+		{
+			const stream = 0
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 1
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 2
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 3
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
 		off += 2
+
 		if off == bufoff {
 			if bufoff > dstEvery {
 				return nil, errors.New("corruption detected: stream overrun 1")
@@ -306,6 +356,7 @@
 			copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4])
 			off = 0
 			dstOut = dstOut[bufoff:]
+			decoded += 256
 			// There must at least be 3 buffers left.
 			if len(dstOut) < dstEvery*3 {
 				return nil, errors.New("corruption detected: stream overrun 2")
@@ -321,9 +372,11 @@
 		copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
 		copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3])
 		copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
 		dstOut = dstOut[off:]
 	}
 
+	// Decode remaining.
 	for i := range br {
 		offset := dstEvery * i
 		br := &br[i]
@@ -335,12 +388,15 @@
 			dstOut[offset] = decode(br)
 			offset++
 		}
+		decoded += offset - dstEvery*i
 		err = br.close()
 		if err != nil {
 			return nil, err
 		}
 	}
-
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
 	return s.Out, nil
 }
 
@@ -360,7 +416,7 @@
 		broken++
 		if enc.nBits == 0 {
 			for _, dec := range dt {
-				if dec.byte == byte(sym) {
+				if uint8(dec.entry>>8) == byte(sym) {
 					fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
 					errs++
 					break
@@ -376,12 +432,12 @@
 		top := enc.val << ub
 		// decoder looks at top bits.
 		dec := dt[top]
-		if dec.nBits != enc.nBits {
-			fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits)
+		if uint8(dec.entry) != enc.nBits {
+			fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
 			errs++
 		}
-		if dec.byte != uint8(sym) {
-			fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte)
+		if uint8(dec.entry>>8) != uint8(sym) {
+			fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
 			errs++
 		}
 		if errs > 0 {
@@ -392,12 +448,12 @@
 		for i := uint16(0); i < (1 << ub); i++ {
 			vval := top | i
 			dec := dt[vval]
-			if dec.nBits != enc.nBits {
-				fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits)
+			if uint8(dec.entry) != enc.nBits {
+				fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
 				errs++
 			}
-			if dec.byte != uint8(sym) {
-				fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte)
+			if uint8(dec.entry>>8) != uint8(sym) {
+				fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
 				errs++
 			}
 			if errs > 20 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 6f823f9..53249df 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -83,12 +83,18 @@
 	MaxSymbolValue uint8
 
 	// TableLog will attempt to override the tablelog for the next block.
-	// Must be <= 11.
+	// Must be <= 11 and >= 5.
 	TableLog uint8
 
 	// Reuse will specify the reuse policy
 	Reuse ReusePolicy
 
+	// WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+	// otherwise the block will be returned as incompressible.
+	// The reduction should then at least be (input size >> WantLogLess)
+	// If WantLogLess == 0 any improvement will do.
+	WantLogLess uint8
+
 	// MaxDecodedSize will set the maximum allowed output size.
 	// This value will automatically be set to BlockSizeMax if not set.
 	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
@@ -99,6 +105,7 @@
 	maxCount       int    // count of the most probable symbol
 	clearCount     bool   // clear count
 	actualTableLog uint8  // Selected tablelog.
+	prevTableLog   uint8  // Tablelog for previous table
 	prevTable      cTable // Table used for previous compression.
 	cTable         cTable // compression table
 	dt             dTable // decompression table
@@ -121,8 +128,8 @@
 	if s.TableLog == 0 {
 		s.TableLog = tableLogDefault
 	}
-	if s.TableLog > tableLogMax {
-		return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
+	if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+		return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
 	}
 	if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
 		s.MaxDecodedSize = BlockSizeMax
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index d9d38b2..bc977a3 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -26,13 +26,17 @@
 
 ### Status: 
 
-BETA - there may still be subtle bugs, but a wide variety of content has been tested. 
-There may still be implementation specific stuff in regards to error handling that could lead to edge cases. 
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively 
+used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases, 
+so as always, testing is recommended.  
 
 For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. 
 
 The "Fastest" compression ratio is roughly equivalent to zstd level 1. 
-The "Default" compression ration is roughly equivalent to zstd level 3 (default).
+The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
 
 In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. 
 The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
@@ -251,8 +255,12 @@
 
 ## Decompressor
 
-STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
 
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, 
+or run it past its limits with ANY input provided.  
  
 ### Usage
 
@@ -382,4 +390,4 @@
 
 For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
 
-This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
\ No newline at end of file
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 3e161ea..ed670bc 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -11,6 +11,7 @@
 	"sync"
 
 	"github.com/klauspost/compress/huff0"
+	"github.com/klauspost/compress/zstd/internal/xxhash"
 )
 
 type blockType uint8
@@ -88,6 +89,7 @@
 	sequenceBuf []seq
 	tmp         [4]byte
 	err         error
+	decWG       sync.WaitGroup
 }
 
 func (b *blockDec) String() string {
@@ -104,6 +106,7 @@
 		input:   make(chan struct{}, 1),
 		history: make(chan *history, 1),
 	}
+	b.decWG.Add(1)
 	go b.startDecoder()
 	return &b
 }
@@ -160,7 +163,8 @@
 	b.data, err = br.readBig(cSize, b.dataStorage)
 	if err != nil {
 		if debug {
-			println("Reading block:", err)
+			println("Reading block:", err, "(", cSize, ")", len(b.data))
+			printf("%T", br)
 		}
 		return err
 	}
@@ -181,11 +185,13 @@
 	close(b.input)
 	close(b.history)
 	close(b.result)
+	b.decWG.Wait()
 }
 
 // decodeAsync will prepare decoding the block when it receives input.
 // This will separate output and history.
 func (b *blockDec) startDecoder() {
+	defer b.decWG.Done()
 	for range b.input {
 		//println("blockDec: Got block input")
 		switch b.Type {
@@ -275,7 +281,7 @@
 		hist.b = nil
 		err := b.decodeCompressed(hist)
 		if debug {
-			println("Decompressed to total", len(b.dst), "bytes, error:", err)
+			println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
 		}
 		hist.b = b.dst
 		b.dst = saved
@@ -368,7 +374,7 @@
 		}
 	}
 	if debug {
-		println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize", litCompSize)
+		println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
 	}
 	var literals []byte
 	var huff *huff0.Scratch
@@ -426,7 +432,6 @@
 		}
 		literals = in[:litCompSize]
 		in = in[litCompSize:]
-
 		huff = huffDecoderPool.Get().(*huff0.Scratch)
 		var err error
 		// Ensure we have space to store it.
@@ -637,7 +642,7 @@
 		hist.huffTree = huff
 	}
 	if debug {
-		println("Final literals:", len(literals), "and", nSeqs, "sequences.")
+		println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
 	}
 
 	if nSeqs == 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 9d9151a..507757d 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -51,7 +51,7 @@
 		b.coders.llEnc = &fseEncoder{}
 		b.coders.llPrev = &fseEncoder{}
 	}
-	b.litEnc = &huff0.Scratch{}
+	b.litEnc = &huff0.Scratch{WantLogLess: 4}
 	b.reset(nil)
 }
 
@@ -299,14 +299,28 @@
 	}
 }
 
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(src)))
+	bh.setType(blockTypeRaw)
+	dst = bh.appendTo(dst)
+	dst = append(dst, src...)
+	if debug {
+		println("Adding RAW block, length", len(src))
+	}
+	return dst
+}
+
 // encodeLits can be used if the block is only litLen.
-func (b *blockEnc) encodeLits() error {
+func (b *blockEnc) encodeLits(raw bool) error {
 	var bh blockHeader
 	bh.setLast(b.last)
 	bh.setSize(uint32(len(b.literals)))
 
 	// Don't compress extremely small blocks
-	if len(b.literals) < 32 {
+	if len(b.literals) < 32 || raw {
 		if debug {
 			println("Adding RAW block, length", len(b.literals))
 		}
@@ -324,18 +338,10 @@
 	if len(b.literals) >= 1024 {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
-		if len(out) > len(b.literals)-len(b.literals)>>4 {
-			// Bail out of compression is too little.
-			err = huff0.ErrIncompressible
-		}
 	} else if len(b.literals) > 32 {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
-		if len(out) > len(b.literals)-len(b.literals)>>4 {
-			// Bail out of compression is too little.
-			err = huff0.ErrIncompressible
-		}
 	} else {
 		err = huff0.ErrIncompressible
 	}
@@ -391,10 +397,56 @@
 	return nil
 }
 
-// encode will encode the block and put the output in b.output.
-func (b *blockEnc) encode() error {
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+	if len(data) > maxSequences || len(data) < 2 {
+		return 0
+	}
+	enc := fseEncoder{}
+	hist := enc.Histogram()[:256]
+	maxSym := uint8(0)
+	for i, v := range data {
+		v = v & 63
+		data[i] = v
+		hist[v]++
+		if v > maxSym {
+			maxSym = v
+		}
+	}
+	if maxSym == 0 {
+		// All 0
+		return 0
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	cnt := maxCount(hist[:maxSym])
+	if cnt == len(data) {
+		// RLE
+		return 0
+	}
+	enc.HistogramFinished(maxSym, cnt)
+	err := enc.normalizeCount(len(data))
+	if err != nil {
+		return 0
+	}
+	_, err = enc.writeCount(nil)
+	if err != nil {
+		panic(err)
+	}
+	return 1
+}
+
+// encode will encode the block and append the output in b.output.
+func (b *blockEnc) encode(raw bool) error {
 	if len(b.sequences) == 0 {
-		return b.encodeLits()
+		return b.encodeLits(raw)
 	}
 	// We want some difference
 	if len(b.literals) > (b.size - (b.size >> 5)) {
@@ -405,6 +457,8 @@
 	var lh literalsHeader
 	bh.setLast(b.last)
 	bh.setType(blockTypeCompressed)
+	// Store offset of the block header. Needed when we know the size.
+	bhOffset := len(b.output)
 	b.output = bh.appendTo(b.output)
 
 	var (
@@ -412,22 +466,17 @@
 		reUsed, single bool
 		err            error
 	)
-	if len(b.literals) >= 1024 {
+	if len(b.literals) >= 1024 && !raw {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
-		if len(out) > len(b.literals)-len(b.literals)>>4 {
-			err = huff0.ErrIncompressible
-		}
-	} else if len(b.literals) > 32 {
+	} else if len(b.literals) > 32 && !raw {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
-		if len(out) > len(b.literals)-len(b.literals)>>4 {
-			err = huff0.ErrIncompressible
-		}
 	} else {
 		err = huff0.ErrIncompressible
 	}
+
 	switch err {
 	case huff0.ErrIncompressible:
 		lh.setType(literalsBlockRaw)
@@ -695,23 +744,23 @@
 	}
 	b.output = wr.out
 
-	if len(b.output)-3 >= b.size {
+	if len(b.output)-3-bhOffset >= b.size {
 		// Maybe even add a bigger margin.
 		b.litEnc.Reuse = huff0.ReusePolicyNone
 		return errIncompressible
 	}
 
 	// Size is output minus block header.
-	bh.setSize(uint32(len(b.output)) - 3)
+	bh.setSize(uint32(len(b.output)-bhOffset) - 3)
 	if debug {
 		println("Rewriting block header", bh)
 	}
-	_ = bh.appendTo(b.output[:0])
+	_ = bh.appendTo(b.output[bhOffset:bhOffset])
 	b.coders.setPrev(llEnc, mlEnc, ofEnc)
 	return nil
 }
 
-var errIncompressible = errors.New("uncompressible")
+var errIncompressible = errors.New("incompressible")
 
 func (b *blockEnc) genCodes() {
 	if len(b.sequences) == 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 3538063..07321ac 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -101,6 +101,9 @@
 		dst = make([]byte, n)
 	}
 	n2, err := io.ReadFull(r.r, dst[:n])
+	if err == io.EOF && n > 0 {
+		err = io.ErrUnexpectedEOF
+	}
 	return dst[:n2], err
 }
 
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 098f9bf..35a3cda 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -124,7 +124,9 @@
 			if d.current.err != nil {
 				break
 			}
-			d.nextBlock()
+			if !d.nextBlock(n == 0) {
+				return n, nil
+			}
 		}
 	}
 	if len(d.current.b) > 0 {
@@ -252,7 +254,7 @@
 		if d.current.err != nil {
 			break
 		}
-		d.nextBlock()
+		d.nextBlock(true)
 	}
 	err := d.current.err
 	if err != nil {
@@ -329,7 +331,10 @@
 
 // nextBlock returns the next block.
 // If an error occurs d.err will be set.
-func (d *Decoder) nextBlock() {
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
 	if d.current.d != nil {
 		if debug {
 			printf("re-adding current decoder %p", d.current.d)
@@ -339,12 +344,22 @@
 	}
 	if d.current.err != nil {
 		// Keep error state.
-		return
+		return blocking
 	}
-	d.current.decodeOutput = <-d.current.output
+
+	if blocking {
+		d.current.decodeOutput = <-d.current.output
+	} else {
+		select {
+		case d.current.decodeOutput = <-d.current.output:
+		default:
+			return false
+		}
+	}
 	if debug {
 		println("got", len(d.current.b), "bytes, error:", d.current.err)
 	}
+	return true
 }
 
 // Close will release all resources.
@@ -373,6 +388,35 @@
 	d.current.err = ErrDecoderClosed
 }
 
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+	return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+	d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+	return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+	return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+	c.d.Close()
+	return nil
+}
+
 type decodeOutput struct {
 	d   *blockDec
 	b   []byte
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index e120625..ee3b09b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -235,7 +235,7 @@
 				if debug && s-t > e.maxMatchOff {
 					panic("s - t >e.maxMatchOff")
 				}
-				if debug {
+				if debugMatches {
 					println("long match")
 				}
 				break
@@ -259,7 +259,7 @@
 					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
 					t = candidateL.offset - e.cur
 					s += checkAt
-					if debug {
+					if debugMatches {
 						println("long match (after short)")
 					}
 					break
@@ -275,7 +275,7 @@
 				if debug && t < 0 {
 					panic("t<0")
 				}
-				if debug {
+				if debugMatches {
 					println("short match")
 				}
 				break
@@ -411,3 +411,316 @@
 		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
 	}
 }
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	if e.cur > (1<<30)+e.maxMatchOff {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		for i := range e.longTable[:] {
+			e.longTable[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	stepSize := int32(e.o.targetLength)
+	if stepSize == 0 {
+		stepSize++
+	}
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		for {
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if len(blk.sequences) > 2 {
+				if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+					seq.matchLen = uint32(length - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += length + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, length)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+		e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hash5(cv0, dFastShortTableBits)] = te0
+		e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+		cv = load6432(src, s)
+
+		if len(blk.sequences) <= 2 {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv1>>8, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 6f388de..0bdddac 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -329,6 +329,246 @@
 	}
 }
 
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if debug {
+		if len(src) > maxBlockSize {
+			panic("src too big")
+		}
+	}
+	// Protect against e.cur wraparound.
+	if e.cur > (1<<30)+e.maxMatchOff {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+
+		for {
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				// lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+				lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+
+				seq.matchLen = uint32(lenght - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += lenght + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, lenght)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlenNoHist(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
 func (e *fastEncoder) addBlock(src []byte) int32 {
 	// check if we have space already
 	if len(e.hist)+len(src) > cap(e.hist) {
@@ -362,6 +602,11 @@
 	e.blk = enc
 }
 
+func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 {
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
 func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
 	if debug {
 		if s < 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index a855655..366dd66 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -29,6 +29,7 @@
 
 type encoder interface {
 	Encode(blk *blockEnc, src []byte)
+	EncodeNoHist(blk *blockEnc, src []byte)
 	Block() *blockEnc
 	CRC() *xxhash.Digest
 	AppendCRC([]byte) []byte
@@ -262,7 +263,7 @@
 			// If we got the exact same number of literals as input,
 			// assume the literals cannot be compressed.
 			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
-				err = blk.encode()
+				err = blk.encode(e.o.noEntropy)
 			}
 			switch err {
 			case errIncompressible:
@@ -404,7 +405,7 @@
 			// Add frame header.
 			fh := frameHeader{
 				ContentSize:   0,
-				WindowSize:    minWindowSize,
+				WindowSize:    MinWindowSize,
 				SingleSegment: true,
 				// Adding a checksum would be a waste of space.
 				Checksum: false,
@@ -433,7 +434,8 @@
 	}()
 	enc.Reset()
 	blk := enc.Block()
-	single := len(src) > 1<<20
+	// Use single segments when above minimum window and below 1MB.
+	single := len(src) < 1<<20 && len(src) > MinWindowSize
 	if e.o.single != nil {
 		single = *e.o.single
 	}
@@ -454,26 +456,23 @@
 		panic(err)
 	}
 
-	for len(src) > 0 {
-		todo := src
-		if len(todo) > e.o.blockSize {
-			todo = todo[:e.o.blockSize]
-		}
-		src = src[len(todo):]
+	if len(src) <= e.o.blockSize && len(src) <= maxBlockSize {
+		// Slightly faster with no history and everything in one block.
 		if e.o.crc {
-			_, _ = enc.CRC().Write(todo)
+			_, _ = enc.CRC().Write(src)
 		}
 		blk.reset(nil)
-		blk.pushOffsets()
-		enc.Encode(blk, todo)
-		if len(src) == 0 {
-			blk.last = true
-		}
-		err := errIncompressible
+		blk.last = true
+		enc.EncodeNoHist(blk, src)
+
 		// If we got the exact same number of literals as input,
 		// assume the literals cannot be compressed.
-		if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
-			err = blk.encode()
+		err := errIncompressible
+		oldout := blk.output
+		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+			// Output directly to dst
+			blk.output = dst
+			err = blk.encode(e.o.noEntropy)
 		}
 
 		switch err {
@@ -481,13 +480,49 @@
 			if debug {
 				println("Storing incompressible block as raw")
 			}
-			blk.encodeRaw(todo)
-			blk.popOffsets()
+			dst = blk.encodeRawTo(dst, src)
 		case nil:
+			dst = blk.output
 		default:
 			panic(err)
 		}
-		dst = append(dst, blk.output...)
+		blk.output = oldout
+	} else {
+		for len(src) > 0 {
+			todo := src
+			if len(todo) > e.o.blockSize {
+				todo = todo[:e.o.blockSize]
+			}
+			src = src[len(todo):]
+			if e.o.crc {
+				_, _ = enc.CRC().Write(todo)
+			}
+			blk.reset(nil)
+			blk.pushOffsets()
+			enc.Encode(blk, todo)
+			if len(src) == 0 {
+				blk.last = true
+			}
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+				err = blk.encode(e.o.noEntropy)
+			}
+
+			switch err {
+			case errIncompressible:
+				if debug {
+					println("Storing incompressible block as raw")
+				}
+				dst = blk.encodeRawTo(dst, todo)
+				blk.popOffsets()
+			case nil:
+				dst = append(dst, blk.output...)
+			default:
+				panic(err)
+			}
+		}
 	}
 	if e.o.crc {
 		dst = enc.AppendCRC(dst)
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 9354839..40eb457 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -1,6 +1,7 @@
 package zstd
 
 import (
+	"errors"
 	"fmt"
 	"runtime"
 	"strings"
@@ -19,6 +20,7 @@
 	windowSize int
 	level      EncoderLevel
 	fullZero   bool
+	noEntropy  bool
 }
 
 func (o *encoderOptions) setDefault() {
@@ -64,6 +66,30 @@
 	}
 }
 
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between WindowSizeMin and WindowSizeMax.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case n < MinWindowSize:
+			return fmt.Errorf("window size must be at least %d", MinWindowSize)
+		case n > MaxWindowSize:
+			return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+		case (n & (n - 1)) != 0:
+			return errors.New("window size must be a power of 2")
+		}
+
+		o.windowSize = n
+		if o.blockSize > o.windowSize {
+			o.blockSize = o.windowSize
+		}
+		return nil
+	}
+}
+
 // WithEncoderPadding will add padding to all output so the size will be a multiple of n.
 // This can be used to obfuscate the exact output size or make blocks of a certain size.
 // The contents will be a skippable frame, so it will be invisible by the decoder.
@@ -177,6 +203,16 @@
 	}
 }
 
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.noEntropy = b
+		return nil
+	}
+}
+
 // WithSingleSegment will set the "single segment" flag when EncodeAll is used.
 // If this flag is set, data must be regenerated within a single continuous memory segment.
 // In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 839a95f..4079074 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -49,7 +49,8 @@
 
 const (
 	// The minimum Window_Size is 1 KB.
-	minWindowSize = 1 << 10
+	MinWindowSize = 1 << 10
+	MaxWindowSize = 1 << 30
 )
 
 var (
@@ -60,7 +61,7 @@
 func newFrameDec(o decoderOptions) *frameDec {
 	d := frameDec{
 		o:             o,
-		maxWindowSize: 1 << 30,
+		maxWindowSize: MaxWindowSize,
 	}
 	if d.maxWindowSize > o.maxDecodedSize {
 		d.maxWindowSize = o.maxDecodedSize
@@ -193,14 +194,14 @@
 			// When FCS_Field_Size is 2, the offset of 256 is added.
 			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
 		case 4:
-			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3] << 24))
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
 		case 8:
 			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
 			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
 			d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
 		}
 		if debug {
-			println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]))
+			println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
 		}
 	}
 	// Move this to shared.
@@ -215,8 +216,8 @@
 	if d.WindowSize == 0 && d.SingleSegment {
 		// We may not need window in this case.
 		d.WindowSize = d.FrameContentSize
-		if d.WindowSize < minWindowSize {
-			d.WindowSize = minWindowSize
+		if d.WindowSize < MinWindowSize {
+			d.WindowSize = MinWindowSize
 		}
 	}
 
@@ -225,7 +226,7 @@
 		return ErrWindowSizeExceeded
 	}
 	// The minimum Window_Size is 1 KB.
-	if d.WindowSize < minWindowSize {
+	if d.WindowSize < MinWindowSize {
 		println("got window size: ", d.WindowSize)
 		return ErrWindowSizeTooSmall
 	}
@@ -309,7 +310,9 @@
 		}
 		return ErrCRCMismatch
 	}
-	println("CRC ok")
+	if debug {
+		println("CRC ok", tmp[:])
+	}
 	return nil
 }
 
@@ -411,6 +414,7 @@
 		}
 		written += int64(len(r.b))
 		if d.SingleSegment && uint64(written) > d.FrameContentSize {
+			println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
 			r.err = ErrFrameSizeExceeded
 			output <- r
 			return
@@ -461,6 +465,7 @@
 			break
 		}
 		if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
 			err = ErrFrameSizeExceeded
 			break
 		}
@@ -473,9 +478,10 @@
 			if err == nil {
 				if n != len(dst)-crcStart {
 					err = io.ErrShortWrite
+				} else {
+					err = d.checkCRC()
 				}
 			}
-			err = d.checkCRC()
 		}
 	}
 	d.history.b = saved
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index dfa6cf7..619836f 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -502,21 +502,6 @@
 // writeCount will write the normalized histogram count to header.
 // This is read back by readNCount.
 func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
-	var (
-		tableLog  = s.actualTableLog
-		tableSize = 1 << tableLog
-		previous0 bool
-		charnum   uint16
-
-		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
-
-		// Write Table Size
-		bitStream = uint32(tableLog - minEncTablelog)
-		bitCount  = uint(4)
-		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
-		threshold = int16(tableSize)
-		nbBits    = uint(tableLog + 1)
-	)
 	if s.useRLE {
 		return append(out, s.rleVal), nil
 	}
@@ -524,7 +509,28 @@
 		// Never write predefined.
 		return out, nil
 	}
-	outP := len(out)
+
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		// maximum header size plus 2 extra bytes for final output if bitCount == 0.
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minEncTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+		outP      = len(out)
+	)
+	if cap(out) < outP+maxHeaderSize {
+		out = append(out, make([]byte, maxHeaderSize*3)...)
+		out = out[:len(out)-maxHeaderSize*3]
+	}
 	out = out[:outP+maxHeaderSize]
 
 	// stops at 1
@@ -594,11 +600,14 @@
 		}
 	}
 
+	if outP+2 > len(out) {
+		return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+	}
 	out[outP] = byte(bitStream)
 	out[outP+1] = byte(bitStream >> 8)
 	outP += int((bitCount + 7) / 8)
 
-	if uint16(charnum) > s.symbolLen {
+	if charnum > s.symbolLen {
 		return nil, errors.New("internal error: charnum > s.symbolLen")
 	}
 	return out[:outP], nil
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index a048818..356956b 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -111,7 +111,7 @@
 			// Add empty last block
 			r.block.reset(nil)
 			r.block.last = true
-			err := r.block.encodeLits()
+			err := r.block.encodeLits(false)
 			if err != nil {
 				return written, err
 			}
@@ -178,7 +178,7 @@
 				r.err = ErrSnappyCorrupt
 				return written, r.err
 			}
-			err = r.block.encode()
+			err = r.block.encode(false)
 			switch err {
 			case errIncompressible:
 				r.block.popOffsets()
@@ -188,7 +188,7 @@
 					println("snappy.Decode:", err)
 					return written, err
 				}
-				err = r.block.encodeLits()
+				err = r.block.encodeLits(false)
 				if err != nil {
 					return written, err
 				}
@@ -235,7 +235,7 @@
 				r.err = ErrSnappyCorrupt
 				return written, r.err
 			}
-			err := r.block.encodeLits()
+			err := r.block.encodeLits(false)
 			if err != nil {
 				return written, err
 			}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index b975954..57a8a2f 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -11,6 +11,7 @@
 
 const debug = false
 const debugSequences = false
+const debugMatches = false
 
 // force encoder to use predefined tables.
 const forcePreDef = false