[VOL-4290] Voltha go library updates for gRPC migration

Change-Id: I1aa2774beb6b7ed7419bc45aeb53fcae8a8ecda0
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
index 6e362e4..2c9adc2 100644
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -25,3 +25,5 @@
 
 coverage.txt
 profile.out
+
+simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml
new file mode 100644
index 0000000..09e5c46
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.golangci.yml
@@ -0,0 +1,74 @@
+run:
+  timeout: 5m
+  deadline: 10m
+
+linters-settings:
+  govet:
+    check-shadowing: false
+  golint:
+    min-confidence: 0
+  gocyclo:
+    min-complexity: 99
+  maligned:
+    suggest-new: true
+  dupl:
+    threshold: 100
+  goconst:
+    min-len: 2
+    min-occurrences: 3
+  misspell:
+    locale: US
+  goimports:
+    local-prefixes: github.com/Shopify/sarama
+  gocritic:
+    enabled-tags:
+      - diagnostic
+      - experimental
+      - opinionated
+      - performance
+      - style
+    disabled-checks:
+      - wrapperFunc
+      - ifElseChain
+  funlen:
+    lines: 300
+    statements: 300
+
+linters:
+  disable-all: true
+  enable:
+    - bodyclose
+    - deadcode
+    - depguard
+    - dogsled
+    # - dupl
+    - errcheck
+    - funlen
+    - gochecknoinits
+    # - goconst
+    # - gocritic
+    - gocyclo
+    - gofmt
+    - goimports
+    # - golint
+    - gosec
+    # - gosimple
+    - govet
+    # - ineffassign
+    # - misspell
+    # - nakedret
+    # - scopelint
+    - staticcheck
+    - structcheck
+    # - stylecheck
+    - typecheck
+    - unconvert
+    - unused
+    - varcheck
+    - whitespace
+
+issues:
+  exclude:
+    - "G404: Use of weak random number generator"
+  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
+  max-same-issues: 0
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
deleted file mode 100644
index 4331fa1..0000000
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-dist: xenial
-language: go
-go:
-- 1.11.x
-- 1.12.x
-
-env:
-  global:
-  - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
-  - TOXIPROXY_ADDR=http://localhost:8474
-  - KAFKA_INSTALL_ROOT=/home/travis/kafka
-  - KAFKA_HOSTNAME=localhost
-  - DEBUG=true
-  matrix:
-  - KAFKA_VERSION=2.1.1 KAFKA_SCALA_VERSION=2.12
-  - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
-  - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
-
-before_install:
-- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
-- vagrant/install_cluster.sh
-- vagrant/boot_cluster.sh
-- vagrant/create_topics.sh
-- vagrant/run_java_producer.sh
-
-install: make install_dependencies
-
-script:
-- make test
-- make vet
-- make errcheck
-- if [[ "$TRAVIS_GO_VERSION" == 1.12* ]]; then make fmt; fi
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
-
-after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
index 02bd0ff..59ccd1d 100644
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -1,5 +1,237 @@
 # Changelog
 
+#### Unreleased
+
+#### Version 1.28.0 (2021-02-15)
+
+**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
+
+- #1870 - @kvch - Update Kerberos library to latest major
+- #1876 - @bai - Update docs, reference pkg.go.dev
+- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
+- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
+- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
+- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
+- #1862 - @bai - Fix CI setenv permissions issues
+- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
+- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
+
+#### Version 1.27.2 (2020-10-21)
+
+# Improvements
+
+#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
+
+# Fixes
+
+#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
+
+#### Version 1.27.1 (2020-10-07)
+
+# Improvements
+
+#1775 - @d1egoaz - Adds a Producer Interceptor example
+#1781 - @justin-chen - Refresh brokers given list of seed brokers
+#1784 - @justin-chen - Add randomize seed broker method
+#1790 - @d1egoaz - remove example binary
+#1798 - @bai - Test against Go 1.15
+#1785 - @justin-chen - Add private method to Client interface to prevent implementation
+#1802 - @uvw - Support Go 1.13 error unwrapping
+
+# Fixes
+
+#1791 - @stanislavkozlovski - bump default version to 1.0.0
+
+#### Version 1.27.0 (2020-08-11)
+
+# Improvements
+
+#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
+#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
+#1699 - @wclaeys  - Consumer group support for manually comitting offsets
+#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
+#1726 - @d1egoaz - Include zstd on the functional tests
+#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
+#1738 - @varun06 - fixed variable names that are named same as some std lib package names
+#1741 - @varun06 - updated zstd dependency to latest v1.10.10
+#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
+#1763 - @alrs - remove deprecated tls options from test
+#1769 - @bai - Add support for Kafka 2.6.0
+
+# Fixes
+
+#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+#1744 - @alrs  - Fix isBalanced Function Signature
+
+#### Version 1.26.4 (2020-05-19)
+
+# Fixes
+
+- #1701 - @d1egoaz - Set server name only for the current broker
+- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
+
+#### Version 1.26.3 (2020-05-07)
+
+# Fixes
+
+- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
+
+#### Version 1.26.2 (2020-05-06)
+
+# ⚠️ Known Issues
+
+This release has been marked as not ready for production and may be unstable, please use v1.26.4.
+
+# Improvements
+
+- #1560 - @iyacontrol - add sync pool for gzip 1-9
+- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
+- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
+- #1632 - @bai - Add support for Go 1.14
+- #1640 - @random-dwi - Feature/fix list partition reassignments
+- #1646 - @mimaison - Add DescribeLogDirs to admin client
+- #1667 - @bai - Add support for kafka 2.5.0
+
+# Fixes
+
+- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
+- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
+- #1602 - @d1egoaz - adds a note about consumer groups Consume method
+- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
+- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
+- #1614 - @alrs - produce_response.go: Remove Unused Functions
+- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
+- #1639 - @agriffaut - Handle errors with no message but error code
+- #1643 - @kzinglzy - fix `config.net.keepalive`
+- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
+- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
+- #1650 - @lavoiesl - Return the response error in heartbeatLoop
+- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
+- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
+
+#### Version 1.26.1 (2020-02-04)
+
+Improvements:
+- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
+- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
+- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
+- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
+
+Bug Fixes:
+- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
+- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
+
+#### Version 1.26.0 (2020-01-24)
+
+New Features:
+- Enable zstd compression
+  ([1574](https://github.com/Shopify/sarama/pull/1574),
+  [1582](https://github.com/Shopify/sarama/pull/1582))
+- Support headers in tools kafka-console-producer
+  ([1549](https://github.com/Shopify/sarama/pull/1549))
+
+Improvements:
+- Add SASL AuthIdentity to SASL frames (authzid)
+  ([1585](https://github.com/Shopify/sarama/pull/1585)).
+
+Bug Fixes:
+- Sending messages with ZStd compression enabled fails in multiple ways
+  ([1252](https://github.com/Shopify/sarama/issues/1252)).
+- Use the broker for any admin on BrokerConfig
+  ([1571](https://github.com/Shopify/sarama/pull/1571)).
+- Set DescribeConfigRequest Version field
+  ([1576](https://github.com/Shopify/sarama/pull/1576)).
+- ConsumerGroup flooding logs with client/metadata update req
+  ([1578](https://github.com/Shopify/sarama/pull/1578)).
+- MetadataRequest version in DescribeCluster
+  ([1580](https://github.com/Shopify/sarama/pull/1580)).
+- Fix deadlock in consumer group handleError
+  ([1581](https://github.com/Shopify/sarama/pull/1581))
+- Fill in the Fetch{Request,Response} protocol
+  ([1582](https://github.com/Shopify/sarama/pull/1582)).
+- Retry topic request on ControllerNotAvailable
+  ([1586](https://github.com/Shopify/sarama/pull/1586)).
+
+#### Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/Shopify/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/Shopify/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/Shopify/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/Shopify/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/Shopify/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/Shopify/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/Shopify/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/Shopify/sarama/pull/1545)).
+
+#### Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+  ([1520](https://github.com/Shopify/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+  ([1518](https://github.com/Shopify/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+  ([1525](https://github.com/Shopify/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+  ([1529](https://github.com/Shopify/sarama/pull/1529)).
+
+#### Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+  ([1416](https://github.com/Shopify/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+  ([1477](https://github.com/Shopify/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+  ([1415](https://github.com/Shopify/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+  ([1452](https://github.com/Shopify/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+  ([1458](https://github.com/Shopify/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+  ([1461](https://github.com/Shopify/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+  ([1464](https://github.com/Shopify/sarama/pull/1464)).
+- Remove direct usage of gofork
+  ([1465](https://github.com/Shopify/sarama/pull/1465)).
+- Add support for Go 1.13
+  ([1478](https://github.com/Shopify/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+  ([1481](https://github.com/Shopify/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+  ([1434](https://github.com/Shopify/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+  ([1442](https://github.com/Shopify/sarama/pull/1442)).
+- Use released version of lz4 library
+  ([1469](https://github.com/Shopify/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+  ([1484](https://github.com/Shopify/sarama/pull/1484)).
+- Fix CLI help message typo
+  ([1494](https://github.com/Shopify/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+  See https://github.com/Shopify/sarama/issues/1252
+
 #### Version 1.23.1 (2019-07-22)
 
 Bug Fixes:
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
index 360b220..4714d77 100644
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -1,52 +1,31 @@
-export GO111MODULE=on
+default: fmt get update test lint
 
-default: fmt vet errcheck test lint
+GO       := go
+GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
+GOTEST   := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
 
-# Taken from https://github.com/codecov/example-go#caveat-multiple-files
-.PHONY: test
-test:
-	echo "" > coverage.txt
-	for d in `go list ./...`; do \
-		go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
-		if [ -f profile.out ]; then \
-			cat profile.out >> coverage.txt; \
-			rm profile.out; \
-		fi \
-	done
+FILES    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
+TESTS    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
 
-GOLINT := $(shell command -v golint)
-
-.PHONY: lint
-lint:
-ifndef GOLINT
-	go get golang.org/x/lint/golint
-endif
-	go list ./... | xargs golint
-
-.PHONY: vet
-vet:
-	go vet ./...
-
-ERRCHECK := $(shell command -v errcheck)
-# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
-.PHONY: errcheck
-errcheck:
-ifndef ERRCHECK
-	go get github.com/kisielk/errcheck
-endif
-	errcheck -ignorepkg fmt github.com/Shopify/sarama/...
-
-.PHONY: fmt
-fmt:
-	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
-
-.PHONY : install_dependencies
-install_dependencies: get
-
-.PHONY: get
 get:
-	go get -t -v ./...
+	$(GO) get ./...
+	$(GO) mod verify
+	$(GO) mod tidy
 
-.PHONY: clean
-clean:
-	go clean ./...
+update:
+	$(GO) get -u -v ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+fmt:
+	gofmt -s -l -w $(FILES) $(TESTS)
+
+lint:
+	GOFLAGS="-tags=functional" golangci-lint run
+
+test:
+	$(GOTEST) ./...
+
+.PHONY: test_functional
+test_functional:
+	$(GOTEST) -tags=functional ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
index 4cd736b..f2beb73 100644
--- a/vendor/github.com/Shopify/sarama/README.md
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -1,39 +1,36 @@
-sarama
-======
+# sarama
 
-[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
+[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama)
 [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
 [![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
 
 Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
 
-### Getting started
+## Getting started
 
-- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama).
 - Mocks for testing are available in the [mocks](./mocks) subpackage.
 - The [examples](./examples) directory contains more elaborate example applications.
 - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
 
 You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
 
-### Compatibility and API stability
+## Compatibility and API stability
 
 Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
 the two latest stable releases of Kafka and Go, and we provide a two month
 grace period for older releases. This means we currently officially support
-Go 1.11 through 1.12, and Kafka 2.0 through 2.3, although older releases are
+Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
 still likely to work.
 
 Sarama follows semantic versioning and provides API stability via the gopkg.in service.
 You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
 A changelog is available [here](CHANGELOG.md).
 
-### Contributing
+## Contributing
 
-* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
-  technical and design details.
-* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
-  contains a wealth of useful information.
-* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-* If you have any questions, just ask!
+- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
index f4b848a..07d7ffb 100644
--- a/vendor/github.com/Shopify/sarama/Vagrantfile
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -1,14 +1,8 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
 # We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
 MEMORY = 3072
 
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-  config.vm.box = "ubuntu/trusty64"
+Vagrant.configure("2") do |config|
+  config.vm.box = "ubuntu/bionic64"
 
   config.vm.provision :shell, path: "vagrant/provision.sh"
 
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
index 50b689d..13440be 100644
--- a/vendor/github.com/Shopify/sarama/acl_bindings.go
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -1,6 +1,6 @@
 package sarama
 
-//Resource holds information about acl resource type
+// Resource holds information about acl resource type
 type Resource struct {
 	ResourceType        AclResourceType
 	ResourceName        string
@@ -46,7 +46,7 @@
 	return nil
 }
 
-//Acl holds information about acl type
+// Acl holds information about acl type
 type Acl struct {
 	Principal      string
 	Host           string
@@ -93,7 +93,7 @@
 	return nil
 }
 
-//ResourceAcls is an acl resource type
+// ResourceAcls is an acl resource type
 type ResourceAcls struct {
 	Resource
 	Acls []*Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
index da1cdef..449102f 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//CreateAclsRequest is an acl creation request
+// CreateAclsRequest is an acl creation request
 type CreateAclsRequest struct {
 	Version      int16
 	AclCreations []*AclCreation
@@ -47,6 +47,10 @@
 	return c.Version
 }
 
+func (c *CreateAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 1:
@@ -56,7 +60,7 @@
 	}
 }
 
-//AclCreation is a wrapper around Resource and Acl type
+// AclCreation is a wrapper around Resource and Acl type
 type AclCreation struct {
 	Resource
 	Acl
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
index f5a5e9a..21d6c34 100644
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//CreateAclsResponse is a an acl reponse creation type
+// CreateAclsResponse is a an acl response creation type
 type CreateAclsResponse struct {
 	ThrottleTime         time.Duration
 	AclCreationResponses []*AclCreationResponse
@@ -55,11 +55,15 @@
 	return 0
 }
 
+func (c *CreateAclsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//AclCreationResponse is an acl creation response type
+// AclCreationResponse is an acl creation response type
 type AclCreationResponse struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
index 15908ea..5e5c03b 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//DeleteAclsRequest is a delete acl request
+// DeleteAclsRequest is a delete acl request
 type DeleteAclsRequest struct {
 	Version int
 	Filters []*AclFilter
@@ -48,6 +48,10 @@
 	return int16(d.Version)
 }
 
+func (d *DeleteAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
index 6529565..cd33749 100644
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//DeleteAclsResponse is a delete acl response
+// DeleteAclsResponse is a delete acl response
 type DeleteAclsResponse struct {
 	Version         int16
 	ThrottleTime    time.Duration
@@ -53,14 +53,18 @@
 }
 
 func (d *DeleteAclsResponse) version() int16 {
-	return int16(d.Version)
+	return d.Version
+}
+
+func (d *DeleteAclsResponse) headerVersion() int16 {
+	return 0
 }
 
 func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//FilterResponse is a filter response type
+// FilterResponse is a filter response type
 type FilterResponse struct {
 	Err          KError
 	ErrMsg       *string
@@ -111,7 +115,7 @@
 	return nil
 }
 
-//MatchingAcl is a matching acl type
+// MatchingAcl is a matching acl type
 type MatchingAcl struct {
 	Err    KError
 	ErrMsg *string
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
index 5222d46..e0fe902 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//DescribeAclsRequest is a secribe acl request type
+// DescribeAclsRequest is a secribe acl request type
 type DescribeAclsRequest struct {
 	Version int
 	AclFilter
@@ -25,6 +25,10 @@
 	return int16(d.Version)
 }
 
+func (d *DescribeAclsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
index 12126e5..3255fd4 100644
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-//DescribeAclsResponse is a describe acl response type
+// DescribeAclsResponse is a describe acl response type
 type DescribeAclsResponse struct {
 	Version      int16
 	ThrottleTime time.Duration
@@ -74,7 +74,11 @@
 }
 
 func (d *DescribeAclsResponse) version() int16 {
-	return int16(d.Version)
+	return d.Version
+}
+
+func (d *DescribeAclsResponse) headerVersion() int16 {
+	return 0
 }
 
 func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
index fad5558..b380161 100644
--- a/vendor/github.com/Shopify/sarama/acl_filter.go
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -46,7 +46,6 @@
 
 	if a.Version == 1 {
 		pattern, err := pd.getInt8()
-
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
index c10ad7b..c3ba8dd 100644
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -1,5 +1,10 @@
 package sarama
 
+import (
+	"fmt"
+	"strings"
+)
+
 type (
 	AclOperation int
 
@@ -27,6 +32,61 @@
 	AclOperationIdempotentWrite
 )
 
+func (a *AclOperation) String() string {
+	mapping := map[AclOperation]string{
+		AclOperationUnknown:         "Unknown",
+		AclOperationAny:             "Any",
+		AclOperationAll:             "All",
+		AclOperationRead:            "Read",
+		AclOperationWrite:           "Write",
+		AclOperationCreate:          "Create",
+		AclOperationDelete:          "Delete",
+		AclOperationAlter:           "Alter",
+		AclOperationDescribe:        "Describe",
+		AclOperationClusterAction:   "ClusterAction",
+		AclOperationDescribeConfigs: "DescribeConfigs",
+		AclOperationAlterConfigs:    "AlterConfigs",
+		AclOperationIdempotentWrite: "IdempotentWrite",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclOperationUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclOperation (name without prefix)
+func (a *AclOperation) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation
+func (a *AclOperation) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclOperation{
+		"unknown":         AclOperationUnknown,
+		"any":             AclOperationAny,
+		"all":             AclOperationAll,
+		"read":            AclOperationRead,
+		"write":           AclOperationWrite,
+		"create":          AclOperationCreate,
+		"delete":          AclOperationDelete,
+		"alter":           AclOperationAlter,
+		"describe":        AclOperationDescribe,
+		"clusteraction":   AclOperationClusterAction,
+		"describeconfigs": AclOperationDescribeConfigs,
+		"alterconfigs":    AclOperationAlterConfigs,
+		"idempotentwrite": AclOperationIdempotentWrite,
+	}
+	ao, ok := mapping[normalized]
+	if !ok {
+		*a = AclOperationUnknown
+		return fmt.Errorf("no acl operation with name %s", normalized)
+	}
+	*a = ao
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
 const (
 	AclPermissionUnknown AclPermissionType = iota
@@ -35,6 +95,44 @@
 	AclPermissionAllow
 )
 
+func (a *AclPermissionType) String() string {
+	mapping := map[AclPermissionType]string{
+		AclPermissionUnknown: "Unknown",
+		AclPermissionAny:     "Any",
+		AclPermissionDeny:    "Deny",
+		AclPermissionAllow:   "Allow",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPermissionUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclPermissionType (name without prefix)
+func (a *AclPermissionType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType
+func (a *AclPermissionType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclPermissionType{
+		"unknown": AclPermissionUnknown,
+		"any":     AclPermissionAny,
+		"deny":    AclPermissionDeny,
+		"allow":   AclPermissionAllow,
+	}
+
+	apt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPermissionUnknown
+		return fmt.Errorf("no acl permission with name %s", normalized)
+	}
+	*a = apt
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
 const (
 	AclResourceUnknown AclResourceType = iota
@@ -43,8 +141,53 @@
 	AclResourceGroup
 	AclResourceCluster
 	AclResourceTransactionalID
+	AclResourceDelegationToken
 )
 
+func (a *AclResourceType) String() string {
+	mapping := map[AclResourceType]string{
+		AclResourceUnknown:         "Unknown",
+		AclResourceAny:             "Any",
+		AclResourceTopic:           "Topic",
+		AclResourceGroup:           "Group",
+		AclResourceCluster:         "Cluster",
+		AclResourceTransactionalID: "TransactionalID",
+		AclResourceDelegationToken: "DelegationToken",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclResourceUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourceType (name without prefix)
+func (a *AclResourceType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType
+func (a *AclResourceType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourceType{
+		"unknown":         AclResourceUnknown,
+		"any":             AclResourceAny,
+		"topic":           AclResourceTopic,
+		"group":           AclResourceGroup,
+		"cluster":         AclResourceCluster,
+		"transactionalid": AclResourceTransactionalID,
+		"delegationtoken": AclResourceDelegationToken,
+	}
+
+	art, ok := mapping[normalized]
+	if !ok {
+		*a = AclResourceUnknown
+		return fmt.Errorf("no acl resource with name %s", normalized)
+	}
+	*a = art
+	return nil
+}
+
 // ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
 const (
 	AclPatternUnknown AclResourcePatternType = iota
@@ -53,3 +196,43 @@
 	AclPatternLiteral
 	AclPatternPrefixed
 )
+
+func (a *AclResourcePatternType) String() string {
+	mapping := map[AclResourcePatternType]string{
+		AclPatternUnknown:  "Unknown",
+		AclPatternAny:      "Any",
+		AclPatternMatch:    "Match",
+		AclPatternLiteral:  "Literal",
+		AclPatternPrefixed: "Prefixed",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPatternUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
+func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType
+func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourcePatternType{
+		"unknown":  AclPatternUnknown,
+		"any":      AclPatternAny,
+		"match":    AclPatternMatch,
+		"literal":  AclPatternLiteral,
+		"prefixed": AclPatternPrefixed,
+	}
+
+	arpt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPatternUnknown
+		return fmt.Errorf("no acl resource pattern with name %s", normalized)
+	}
+	*a = arpt
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
index fc227ab..a96af93 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//AddOffsetsToTxnRequest adds offsets to a transaction request
+// AddOffsetsToTxnRequest adds offsets to a transaction request
 type AddOffsetsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
@@ -48,6 +48,10 @@
 	return 0
 }
 
+func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
index c88c1f8..bb61973 100644
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -4,7 +4,7 @@
 	"time"
 )
 
-//AddOffsetsToTxnResponse is a response type for adding offsets to txns
+// AddOffsetsToTxnResponse is a response type for adding offsets to txns
 type AddOffsetsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Err          KError
@@ -40,6 +40,10 @@
 	return 0
 }
 
+func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
index 8d4b42e..57ecf64 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//AddPartitionsToTxnRequest is a add paartition request
+// AddPartitionsToTxnRequest is a add paartition request
 type AddPartitionsToTxnRequest struct {
 	TransactionalID string
 	ProducerID      int64
@@ -72,6 +72,10 @@
 	return 0
 }
 
+func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
index eb4f23e..0989565 100644
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -4,7 +4,7 @@
 	"time"
 )
 
-//AddPartitionsToTxnResponse is a partition errors to transaction type
+// AddPartitionsToTxnResponse is a partition errors to transaction type
 type AddPartitionsToTxnResponse struct {
 	ThrottleTime time.Duration
 	Errors       map[string][]*PartitionError
@@ -79,11 +79,15 @@
 	return 0
 }
 
+func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
 
-//PartitionError is a partition error type
+// PartitionError is a partition error type
 type PartitionError struct {
 	Partition int32
 	Err       KError
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
index 1db6a0e..abe18b1 100644
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -2,8 +2,11 @@
 
 import (
 	"errors"
+	"fmt"
 	"math/rand"
+	"strconv"
 	"sync"
+	"time"
 )
 
 // ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
@@ -39,6 +42,14 @@
 	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
 	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
 
+	// Alter the replica assignment for partitions.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	AlterPartitionReassignments(topic string, assignment [][]int32) error
+
+	// Provides info on ongoing partitions replica reassignments.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
+
 	// Delete records whose offset is smaller than the given offset of the corresponding partition.
 	// This operation is supported by brokers with version 0.11.0.0 or higher.
 	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
@@ -90,6 +101,18 @@
 	// Get information about the nodes in the cluster
 	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
 
+	// Get information about all log directories on the given set of brokers
+	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
+
+	// Get information about SCRAM users
+	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
+
+	// Delete SCRAM users
+	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
+
+	// Upsert SCRAM users
+	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
+
 	// Close shuts down the admin and closes underlying client.
 	Close() error
 }
@@ -105,9 +128,14 @@
 	if err != nil {
 		return nil, err
 	}
+	return NewClusterAdminFromClient(client)
+}
 
-	//make sure we can retrieve the controller
-	_, err = client.Controller()
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+	// make sure we can retrieve the controller
+	_, err := client.Controller()
 	if err != nil {
 		return nil, err
 	}
@@ -127,8 +155,45 @@
 	return ca.client.Controller()
 }
 
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+func (ca *clusterAdmin) refreshController() (*Broker, error) {
+	return ca.client.RefreshController()
+}
 
+// isErrNoController returns `true` if the given error type unwraps to an
+// `ErrNotController` response from Kafka
+func isErrNoController(err error) bool {
+	switch e := err.(type) {
+	case *TopicError:
+		return e.Err == ErrNotController
+	case *TopicPartitionError:
+		return e.Err == ErrNotController
+	case KError:
+		return e == ErrNotController
+	}
+	return false
+}
+
+// retryOnError will repeatedly call the given (error-returning) func in the
+// case that its response is non-nil and retryable (as determined by the
+// provided retryable func) up to the maximum number of tries permitted by
+// the admin client configuration
+func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
+	var err error
+	for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
+		err = fn()
+		if err == nil || !retryable(err) {
+			return err
+		}
+		Logger.Printf(
+			"admin/request retrying after %dms... (%d attempts remaining)\n",
+			ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
+		time.Sleep(ca.conf.Admin.Retry.Backoff)
+		continue
+	}
+	return err
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -153,26 +218,31 @@
 		request.Version = 2
 	}
 
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
 
-	rsp, err := b.CreateTopics(request)
-	if err != nil {
-		return err
-	}
+		rsp, err := b.CreateTopics(request)
+		if err != nil {
+			return err
+		}
 
-	topicErr, ok := rsp.TopicErrors[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
+		topicErr, ok := rsp.TopicErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
 
-	if topicErr.Err != ErrNoError {
-		return topicErr
-	}
+		if topicErr.Err != ErrNoError {
+			if topicErr.Err == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
 
-	return nil
+		return nil
+	})
 }
 
 func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
@@ -209,6 +279,10 @@
 		Topics: []string{},
 	}
 
+	if ca.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 1
+	}
+
 	response, err := controller.GetMetadata(request)
 	if err != nil {
 		return nil, int32(0), err
@@ -217,6 +291,16 @@
 	return response.Brokers, response.ControllerID, nil
 }
 
+func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
+	brokers := ca.client.Brokers()
+	for _, b := range brokers {
+		if b.ID() == id {
+			return b, nil
+		}
+	}
+	return nil, fmt.Errorf("could not find broker id %d", id)
+}
+
 func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
 	brokers := ca.client.Brokers()
 	if len(brokers) > 0 {
@@ -274,6 +358,15 @@
 	describeConfigsReq := &DescribeConfigsRequest{
 		Resources: describeConfigsResources,
 	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		describeConfigsReq.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		describeConfigsReq.Version = 2
+	}
+
 	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
 	if err != nil {
 		return nil, err
@@ -299,7 +392,6 @@
 }
 
 func (ca *clusterAdmin) DeleteTopic(topic string) error {
-
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -313,25 +405,31 @@
 		request.Version = 1
 	}
 
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
 
-	rsp, err := b.DeleteTopics(request)
-	if err != nil {
-		return err
-	}
+		rsp, err := b.DeleteTopics(request)
+		if err != nil {
+			return err
+		}
 
-	topicErr, ok := rsp.TopicErrorCodes[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
+		topicErr, ok := rsp.TopicErrorCodes[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
 
-	if topicErr != ErrNoError {
-		return topicErr
-	}
-	return nil
+		if topicErr != ErrNoError {
+			if topicErr == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
 }
 
 func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
@@ -347,30 +445,110 @@
 		Timeout:         ca.conf.Admin.Timeout,
 	}
 
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreatePartitions(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicPartitionErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if topicErr.Err != ErrNoError {
+			if topicErr.Err == ErrNotController {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &AlterPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	for i := 0; i < len(assignment); i++ {
+		request.AddBlock(topic, int32(i), assignment[i])
+	}
+
+	return ca.retryOnError(isErrNoController, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		errs := make([]error, 0)
+
+		rsp, err := b.AlterPartitionReassignments(request)
+
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if rsp.ErrorCode > 0 {
+				errs = append(errs, errors.New(rsp.ErrorCode.Error()))
+			}
+
+			for topic, topicErrors := range rsp.Errors {
+				for partition, partitionError := range topicErrors {
+					if partitionError.errorCode != ErrNoError {
+						errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
+						errs = append(errs, errors.New(errStr))
+					}
+				}
+			}
+		}
+
+		if len(errs) > 0 {
+			return ErrReassignPartitions{MultiError{&errs}}
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
+	if topic == "" {
+		return nil, ErrInvalidTopic
+	}
+
+	request := &ListPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	request.AddBlock(topic, partitions)
+
 	b, err := ca.Controller()
 	if err != nil {
-		return err
+		return nil, err
 	}
+	_ = b.Open(ca.client.Config())
 
-	rsp, err := b.CreatePartitions(request)
-	if err != nil {
-		return err
+	rsp, err := b.ListPartitionReassignments(request)
+
+	if err == nil && rsp != nil {
+		return rsp.TopicStatus, nil
+	} else {
+		return nil, err
 	}
-
-	topicErr, ok := rsp.TopicPartitionErrors[topic]
-	if !ok {
-		return ErrIncompleteResponse
-	}
-
-	if topicErr.Err != ErrNoError {
-		return topicErr
-	}
-
-	return nil
 }
 
 func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-
 	if topic == "" {
 		return ErrInvalidTopic
 	}
@@ -380,11 +558,7 @@
 		if err != nil {
 			return err
 		}
-		if _, ok := partitionPerBroker[broker]; ok {
-			partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
-		} else {
-			partitionPerBroker[broker] = []int32{partition}
-		}
+		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
 	}
 	errs := make([]error, 0)
 	for broker, partitions := range partitionPerBroker {
@@ -418,13 +592,19 @@
 	if len(errs) > 0 {
 		return ErrDeleteRecords{MultiError{&errs}}
 	}
-	//todo since we are dealing with couple of partitions it would be good if we return slice of errors
-	//for each partition instead of one error
+	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	// for each partition instead of one error
 	return nil
 }
 
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+// Returns a bool indicating whether the resource request needs to go to a
+// specific broker
+func dependsOnSpecificNode(resource ConfigResource) bool {
+	return (resource.Type == BrokerResource && resource.Name != "") ||
+		resource.Type == BrokerLoggerResource
+}
 
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
 	var entries []ConfigEntry
 	var resources []*ConfigResource
 	resources = append(resources, &resource)
@@ -433,11 +613,35 @@
 		Resources: resources,
 	}
 
-	b, err := ca.Controller()
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// DescribeConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(resource) {
+		var id int64
+		id, err = strconv.ParseInt(resource.Name, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
 	if err != nil {
 		return nil, err
 	}
 
+	_ = b.Open(ca.client.Config())
 	rsp, err := b.DescribeConfigs(request)
 	if err != nil {
 		return nil, err
@@ -448,6 +652,9 @@
 			if rspResource.ErrorMsg != "" {
 				return nil, errors.New(rspResource.ErrorMsg)
 			}
+			if rspResource.ErrorCode != 0 {
+				return nil, KError(rspResource.ErrorCode)
+			}
 			for _, cfgEntry := range rspResource.Configs {
 				entries = append(entries, *cfgEntry)
 			}
@@ -457,7 +664,6 @@
 }
 
 func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-
 	var resources []*AlterConfigsResource
 	resources = append(resources, &AlterConfigsResource{
 		Type:          resourceType,
@@ -470,11 +676,27 @@
 		ValidateOnly: validateOnly,
 	}
 
-	b, err := ca.Controller()
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
 	if err != nil {
 		return err
 	}
 
+	_ = b.Open(ca.client.Config())
 	rsp, err := b.AlterConfigs(request)
 	if err != nil {
 		return err
@@ -485,6 +707,9 @@
 			if rspResource.ErrorMsg != "" {
 				return errors.New(rspResource.ErrorMsg)
 			}
+			if rspResource.ErrorCode != 0 {
+				return KError(rspResource.ErrorCode)
+			}
 		}
 	}
 	return nil
@@ -495,6 +720,10 @@
 	acls = append(acls, &AclCreation{resource, acl})
 	request := &CreateAclsRequest{AclCreations: acls}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return err
@@ -505,9 +734,12 @@
 }
 
 func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-
 	request := &DescribeAclsRequest{AclFilter: filter}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return nil, err
@@ -530,6 +762,10 @@
 	filters = append(filters, &filter)
 	request := &DeleteAclsRequest{Filters: filters}
 
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
 	b, err := ca.Controller()
 	if err != nil {
 		return nil, err
@@ -545,7 +781,6 @@
 		for _, mACL := range fr.MatchingAcls {
 			mAcls = append(mAcls, *mACL)
 		}
-
 	}
 	return mAcls, nil
 }
@@ -559,7 +794,6 @@
 			return nil, err
 		}
 		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-
 	}
 
 	for broker, brokerGroups := range groupsPerBroker {
@@ -581,7 +815,7 @@
 	// Query brokers in parallel, since we have to query *all* brokers
 	brokers := ca.client.Brokers()
 	groupMaps := make(chan map[string]string, len(brokers))
-	errors := make(chan error, len(brokers))
+	errChan := make(chan error, len(brokers))
 	wg := sync.WaitGroup{}
 
 	for _, b := range brokers {
@@ -592,7 +826,7 @@
 
 			response, err := b.ListGroups(&ListGroupsRequest{})
 			if err != nil {
-				errors <- err
+				errChan <- err
 				return
 			}
 
@@ -602,13 +836,12 @@
 			}
 
 			groupMaps <- groups
-
 		}(b, ca.conf)
 	}
 
 	wg.Wait()
 	close(groupMaps)
-	close(errors)
+	close(errChan)
 
 	for groupMap := range groupMaps {
 		for group, protocolType := range groupMap {
@@ -617,7 +850,7 @@
 	}
 
 	// Intentionally return only the first error for simplicity
-	err = <-errors
+	err = <-errChan
 	return
 }
 
@@ -667,3 +900,106 @@
 
 	return nil
 }
+
+func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
+	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
+
+	// Query brokers in parallel, since we may have to query multiple brokers
+	logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
+	errChan := make(chan error, len(brokerIds))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokerIds {
+		wg.Add(1)
+		broker, err := ca.findBroker(b)
+		if err != nil {
+			Logger.Printf("Unable to find broker with ID = %v\n", b)
+			continue
+		}
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
+			if err != nil {
+				errChan <- err
+				return
+			}
+			logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
+			logDirs[b.ID()] = response.LogDirs
+			logDirsMaps <- logDirs
+		}(broker, ca.conf)
+	}
+
+	wg.Wait()
+	close(logDirsMaps)
+	close(errChan)
+
+	for logDirsMap := range logDirsMaps {
+		for id, logDirs := range logDirsMap {
+			allLogDirs[id] = logDirs
+		}
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
+	req := &DescribeUserScramCredentialsRequest{}
+	for _, u := range users {
+		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
+			Name: u,
+		})
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(upsert, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(nil, delete)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	req := &AlterUserScramCredentialsRequest{
+		Deletions:  d,
+		Upsertions: u,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.AlterUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
index 26c275b..8b94b1f 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -1,12 +1,12 @@
 package sarama
 
-//AlterConfigsRequest is an alter config request type
+// AlterConfigsRequest is an alter config request type
 type AlterConfigsRequest struct {
 	Resources    []*AlterConfigsResource
 	ValidateOnly bool
 }
 
-//AlterConfigsResource is an alter config resource type
+// AlterConfigsResource is an alter config resource type
 type AlterConfigsResource struct {
 	Type          ConfigResourceType
 	Name          string
@@ -117,6 +117,10 @@
 	return 0
 }
 
+func (a *AlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
index 3893663..cfb6369 100644
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -2,13 +2,13 @@
 
 import "time"
 
-//AlterConfigsResponse is a reponse type for alter config
+// AlterConfigsResponse is a response type for alter config
 type AlterConfigsResponse struct {
 	ThrottleTime time.Duration
 	Resources    []*AlterConfigsResourceResponse
 }
 
-//AlterConfigsResourceResponse is a reponse type for alter config resource
+// AlterConfigsResourceResponse is a response type for alter config resource
 type AlterConfigsResourceResponse struct {
 	ErrorCode int16
 	ErrorMsg  string
@@ -23,16 +23,9 @@
 		return err
 	}
 
-	for i := range a.Resources {
-		pe.putInt16(a.Resources[i].ErrorCode)
-		err := pe.putString(a.Resources[i].ErrorMsg)
-		if err != nil {
-			return nil
-		}
-		pe.putInt8(int8(a.Resources[i].Type))
-		err = pe.putString(a.Resources[i].Name)
-		if err != nil {
-			return nil
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
 		}
 	}
 
@@ -56,34 +49,56 @@
 	for i := range a.Resources {
 		a.Resources[i] = new(AlterConfigsResourceResponse)
 
-		errCode, err := pd.getInt16()
-		if err != nil {
+		if err := a.Resources[i].decode(pd, version); err != nil {
 			return err
 		}
-		a.Resources[i].ErrorCode = errCode
-
-		e, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].ErrorMsg = e
-
-		t, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].Type = ConfigResourceType(t)
-
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		a.Resources[i].Name = name
 	}
 
 	return nil
 }
 
+func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
+	pe.putInt16(a.ErrorCode)
+	err := pe.putString(a.ErrorMsg)
+	if err != nil {
+		return nil
+	}
+	pe.putInt8(int8(a.Type))
+	err = pe.putString(a.Name)
+	if err != nil {
+		return nil
+	}
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.ErrorCode = errCode
+
+	e, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.ErrorMsg = e
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	return nil
+}
+
 func (a *AlterConfigsResponse) key() int16 {
 	return 32
 }
@@ -92,6 +107,10 @@
 	return 0
 }
 
+func (a *AlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
new file mode 100644
index 0000000..f0a2f9d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
@@ -0,0 +1,130 @@
+package sarama
+
+type alterPartitionReassignmentsBlock struct {
+	replicas []int32
+}
+
+func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
+	if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
+	if b.replicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type AlterPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
+	Version   int16
+}
+
+func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putCompactArrayLength(len(r.blocks))
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+				r.blocks[topic][partition] = block
+
+				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return
+}
+
+func (r *AlterPartitionReassignmentsRequest) key() int16 {
+	return 45
+}
+
+func (r *AlterPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
new file mode 100644
index 0000000..b3f9a15
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
@@ -0,0 +1,157 @@
+package sarama
+
+type alterPartitionReassignmentsErrorBlock struct {
+	errorCode    KError
+	errorMessage *string
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
+	pe.putInt16(int16(b.errorCode))
+	if err := pe.putNullableCompactString(b.errorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
+	errorCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.errorCode = KError(errorCode)
+	b.errorMessage, err = pd.getCompactNullableString()
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return err
+}
+
+type AlterPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
+}
+
+func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
+		r.Errors[topic] = partitions
+	}
+
+	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
+}
+
+func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.Errors))
+	for topic, partitions := range r.Errors {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+
+			ongoingPartitionReassignments, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
+
+			for j := 0; j < ongoingPartitionReassignments; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsErrorBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+
+				r.Errors[topic][partition] = block
+			}
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) key() int16 {
+	return 45
+}
+
+func (r *AlterPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
new file mode 100644
index 0000000..0530d89
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
@@ -0,0 +1,142 @@
+package sarama
+
+type AlterUserScramCredentialsRequest struct {
+	Version int16
+
+	// Deletions represent list of SCRAM credentials to remove
+	Deletions []AlterUserScramCredentialsDelete
+
+	// Upsertions represent list of SCRAM credentials to update/insert
+	Upsertions []AlterUserScramCredentialsUpsert
+}
+
+type AlterUserScramCredentialsDelete struct {
+	Name      string
+	Mechanism ScramMechanismType
+}
+
+type AlterUserScramCredentialsUpsert struct {
+	Name           string
+	Mechanism      ScramMechanismType
+	Iterations     int32
+	Salt           []byte
+	saltedPassword []byte
+
+	// This field is never transmitted over the wire
+	// @see: https://tools.ietf.org/html/rfc5802
+	Password []byte
+}
+
+func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	pe.putCompactArrayLength(len(r.Deletions))
+	for _, d := range r.Deletions {
+		if err := pe.putCompactString(d.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(d.Mechanism))
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putCompactArrayLength(len(r.Upsertions))
+	for _, u := range r.Upsertions {
+		if err := pe.putCompactString(u.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(u.Mechanism))
+		pe.putInt32(u.Iterations)
+
+		if err := pe.putCompactBytes(u.Salt); err != nil {
+			return err
+		}
+
+		// do not transmit the password over the wire
+		formatter := scramFormatter{mechanism: u.Mechanism}
+		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
+		if err != nil {
+			return err
+		}
+
+		if err := pe.putCompactBytes(salted); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	numDeletions, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
+	for i := 0; i < numDeletions; i++ {
+		r.Deletions[i] = AlterUserScramCredentialsDelete{}
+		if r.Deletions[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	numUpsertions, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
+	for i := 0; i < numUpsertions; i++ {
+		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
+		if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
+		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) key() int16 {
+	return 51
+}
+
+func (r *AlterUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
new file mode 100644
index 0000000..31e167b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+type AlterUserScramCredentialsResponse struct {
+	Version int16
+
+	ThrottleTime time.Duration
+
+	Results []*AlterUserScramCredentialsResult
+}
+
+type AlterUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	pe.putCompactArrayLength(len(r.Results))
+
+	for _, u := range r.Results {
+		if err := pe.putCompactString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	numResults, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numResults > 0 {
+		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
+		for i := 0; i < numResults; i++ {
+			r.Results[i] = &AlterUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getCompactString(); err != nil {
+				return err
+			}
+
+			kerr, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode = KError(kerr)
+			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) key() int16 {
+	return 51
+}
+
+func (r *AlterUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
index b33167c..bee92c0 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -1,8 +1,7 @@
 package sarama
 
-//ApiVersionsRequest ...
-type ApiVersionsRequest struct {
-}
+// ApiVersionsRequest ...
+type ApiVersionsRequest struct{}
 
 func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
 	return nil
@@ -20,6 +19,10 @@
 	return 0
 }
 
+func (a *ApiVersionsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
index bb1f0b3..0e72e39 100644
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ApiVersionsResponseBlock is an api version reponse block type
+// ApiVersionsResponseBlock is an api version response block type
 type ApiVersionsResponseBlock struct {
 	ApiKey     int16
 	MinVersion int16
@@ -32,7 +32,7 @@
 	return nil
 }
 
-//ApiVersionsResponse is an api version response type
+// ApiVersionsResponse is an api version response type
 type ApiVersionsResponse struct {
 	Err         KError
 	ApiVersions []*ApiVersionsResponseBlock
@@ -84,6 +84,10 @@
 	return 0
 }
 
+func (a *ApiVersionsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
index 11e0849..5911f7b 100644
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -60,13 +60,28 @@
 	noProducerEpoch = -1
 )
 
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
 	key := fmt.Sprintf("%s-%d", topic, partition)
 	t.mutex.Lock()
 	defer t.mutex.Unlock()
 	sequence := t.sequenceNumbers[key]
 	t.sequenceNumbers[key] = sequence + 1
-	return sequence
+	return sequence, t.producerEpoch
+}
+
+func (t *transactionManager) bumpEpoch() {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.producerEpoch++
+	for k := range t.sequenceNumbers {
+		t.sequenceNumbers[k] = 0
+	}
+}
+
+func (t *transactionManager) getProducerID() (int64, int16) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return t.producerID, t.producerEpoch
 }
 
 func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
@@ -208,6 +223,8 @@
 	flags          flagSet
 	expectation    chan *ProducerError
 	sequenceNumber int32
+	producerEpoch  int16
+	hasSequence    bool
 }
 
 const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
@@ -234,6 +251,9 @@
 func (m *ProducerMessage) clear() {
 	m.flags = 0
 	m.retries = 0
+	m.sequenceNumber = 0
+	m.producerEpoch = 0
+	m.hasSequence = false
 }
 
 // ProducerError is the type of error generated when the producer fails to deliver a message.
@@ -247,6 +267,10 @@
 	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
 }
 
+func (pe ProducerError) Unwrap() error {
+	return pe.Err
+}
+
 // ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
 // It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
 // when closing a producer.
@@ -328,6 +352,10 @@
 			p.inFlight.Add(1)
 		}
 
+		for _, interceptor := range p.conf.Producer.Interceptors {
+			msg.safelyApplyInterceptor(interceptor)
+		}
+
 		version := 1
 		if p.conf.Version.IsAtLeast(V0_11_0_0) {
 			version = 2
@@ -388,10 +416,6 @@
 				continue
 			}
 		}
-		// All messages being retried (sent or not) have already had their retry count updated
-		if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
-			msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
-		}
 
 		handler := tp.handlers[msg.Partition]
 		if handler == nil {
@@ -411,7 +435,7 @@
 	var partitions []int32
 
 	err := tp.breaker.Run(func() (err error) {
-		var requiresConsistency = false
+		requiresConsistency := false
 		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
 			requiresConsistency = ep.MessageRequiresConsistency(msg)
 		} else {
@@ -425,7 +449,6 @@
 		}
 		return
 	})
-
 	if err != nil {
 		return err
 	}
@@ -520,7 +543,6 @@
 	}()
 
 	for msg := range pp.input {
-
 		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
 			select {
 			case <-pp.brokerProducer.abandoned:
@@ -571,6 +593,15 @@
 			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
 		}
 
+		// Now that we know we have a broker to actually try and send this message to, generate the sequence
+		// number for it.
+		// All messages being retried (sent or not) have already had their retry count updated
+		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
+		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
+			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+			msg.hasSequence = true
+		}
+
 		pp.brokerProducer.input <- msg
 	}
 }
@@ -652,6 +683,7 @@
 		input:          input,
 		output:         bridge,
 		responses:      responses,
+		stopchan:       make(chan struct{}),
 		buffer:         newProduceSet(p),
 		currentRetries: make(map[string]map[int32]error),
 	}
@@ -696,6 +728,7 @@
 	output    chan<- *produceSet
 	responses <-chan *brokerProducerResponse
 	abandoned chan struct{}
+	stopchan  chan struct{}
 
 	buffer     *produceSet
 	timer      <-chan time.Time
@@ -711,12 +744,17 @@
 
 	for {
 		select {
-		case msg := <-bp.input:
-			if msg == nil {
+		case msg, ok := <-bp.input:
+			if !ok {
+				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
 				bp.shutdown()
 				return
 			}
 
+			if msg == nil {
+				continue
+			}
+
 			if msg.flags&syn == syn {
 				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
 					bp.broker.ID(), msg.Topic, msg.Partition)
@@ -742,12 +780,21 @@
 			}
 
 			if bp.buffer.wouldOverflow(msg) {
-				if err := bp.waitForSpace(msg); err != nil {
+				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, false); err != nil {
 					bp.parent.retryMessage(msg, err)
 					continue
 				}
 			}
 
+			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
+				// The epoch was reset, need to roll the buffer over
+				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, true); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
 			if err := bp.buffer.add(msg); err != nil {
 				bp.parent.returnError(msg, err)
 				continue
@@ -760,8 +807,14 @@
 			bp.timerFired = true
 		case output <- bp.buffer:
 			bp.rollOver()
-		case response := <-bp.responses:
-			bp.handleResponse(response)
+		case response, ok := <-bp.responses:
+			if ok {
+				bp.handleResponse(response)
+			}
+		case <-bp.stopchan:
+			Logger.Printf(
+				"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
+			return
 		}
 
 		if bp.timerFired || bp.buffer.readyToFlush() {
@@ -785,7 +838,7 @@
 	for response := range bp.responses {
 		bp.handleResponse(response)
 	}
-
+	close(bp.stopchan)
 	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
 }
 
@@ -797,9 +850,7 @@
 	return bp.currentRetries[msg.Topic][msg.Partition]
 }
 
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
-	Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
 	for {
 		select {
 		case response := <-bp.responses:
@@ -807,7 +858,7 @@
 			// handling a response can change our state, so re-check some things
 			if reason := bp.needsRetry(msg); reason != nil {
 				return reason
-			} else if !bp.buffer.wouldOverflow(msg) {
+			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
 				return nil
 			}
 		case bp.output <- bp.buffer:
@@ -1018,6 +1069,12 @@
 }
 
 func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
+	// will never see a message with this number, so we can never continue the sequence.
+	if msg.hasSequence {
+		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
+		p.txnmgr.bumpEpoch()
+	}
 	msg.clear()
 	pErr := &ProducerError{Msg: msg, Err: err}
 	if p.conf.Producer.Return.Errors {
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
index 2fce17f..9855bf4 100644
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -1,8 +1,25 @@
 package sarama
 
 import (
+	"container/heap"
+	"errors"
+	"fmt"
 	"math"
 	"sort"
+	"strings"
+)
+
+const (
+	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+	RangeBalanceStrategyName = "range"
+
+	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+	RoundRobinBalanceStrategyName = "roundrobin"
+
+	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+	StickyBalanceStrategyName = "sticky"
+
+	defaultGeneration = -1
 )
 
 // BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
@@ -32,6 +49,10 @@
 	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
 	// and returns a distribution plan.
 	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+
+	// AssignmentData returns the serialized assignment data for the specified
+	// memberID
+	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
 }
 
 // --------------------------------------------------------------------
@@ -41,7 +62,7 @@
 //   M1: {T: [0, 1, 2]}
 //   M2: {T: [3, 4, 5]}
 var BalanceStrategyRange = &balanceStrategy{
-	name: "range",
+	name: RangeBalanceStrategyName,
 	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
 		step := float64(len(partitions)) / float64(len(memberIDs))
 
@@ -54,19 +75,18 @@
 	},
 }
 
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
 // Example with topic T with six partitions (0..5) and two members (M1, M2):
 //   M1: {T: [0, 2, 4]}
 //   M2: {T: [1, 3, 5]}
-var BalanceStrategyRoundRobin = &balanceStrategy{
-	name: "roundrobin",
-	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
-		for i, part := range partitions {
-			memberID := memberIDs[i%len(memberIDs)]
-			plan.Add(memberID, topic, part)
-		}
-	},
-}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//   M1: {T: [0, 2]}
+//   M2: {T: [1, 3]}
+//   M3: {T: [4, 5]}
+//
+var BalanceStrategySticky = &stickyBalanceStrategy{}
 
 // --------------------------------------------------------------------
 
@@ -104,6 +124,11 @@
 	return plan, nil
 }
 
+// AssignmentData simple strategies do not require any shared assignment data
+func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil
+}
+
 type balanceStrategySortable struct {
 	topic     string
 	memberIDs []string
@@ -113,6 +138,7 @@
 func (p balanceStrategySortable) Swap(i, j int) {
 	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
 }
+
 func (p balanceStrategySortable) Less(i, j int) bool {
 	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
 }
@@ -127,3 +153,984 @@
 	}
 	return h
 }
+
+type stickyBalanceStrategy struct {
+	movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// track partition movements during generation of the partition assignment plan
+	s.movements = partitionMovements{
+		Movements:                 make(map[topicPartitionAssignment]consumerPair),
+		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+	}
+
+	// prepopulate the current assignment state from userdata on the consumer group members
+	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+	if err != nil {
+		return nil, err
+	}
+
+	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+	isFreshAssignment := false
+	if len(currentAssignment) == 0 {
+		isFreshAssignment = true
+	}
+
+	// create a mapping of all current topic partitions and the consumers that can be assigned to them
+	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+		}
+	}
+
+	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
+	// also, populate the mapping of partitions to potential consumers
+	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+	for memberID, meta := range members {
+		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+		for _, topicSubscription := range meta.Topics {
+			// only evaluate topic subscriptions that are present in the supplied topics map
+			if _, found := topics[topicSubscription]; found {
+				for _, partition := range topics[topicSubscription] {
+					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+				}
+			}
+		}
+
+		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+		if _, exists := currentAssignment[memberID]; !exists {
+			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+		}
+	}
+
+	// create a mapping of each partition to its current consumer, where possible
+	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unvisitedPartitions[partition] = true
+	}
+	var unassignedPartitions []topicPartitionAssignment
+	for memberID, partitions := range currentAssignment {
+		var keepPartitions []topicPartitionAssignment
+		for _, partition := range partitions {
+			// If this partition no longer exists at all, likely due to the
+			// topic being deleted, we remove the partition from the member.
+			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+				continue
+			}
+			delete(unvisitedPartitions, partition)
+			currentPartitionConsumers[partition] = memberID
+
+			if !strsContains(members[memberID].Topics, partition.Topic) {
+				unassignedPartitions = append(unassignedPartitions, partition)
+				continue
+			}
+			keepPartitions = append(keepPartitions, partition)
+		}
+		currentAssignment[memberID] = keepPartitions
+	}
+	for unvisited := range unvisitedPartitions {
+		unassignedPartitions = append(unassignedPartitions, unvisited)
+	}
+
+	// sort the topic partitions in order of priority for reassignment
+	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+	// at this point we have preserved all valid topic partition to consumer assignments and removed
+	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+	// to consumers so that the topic partition assignments are as balanced as possible.
+
+	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(currentAssignment))
+	for memberID, assignments := range currentAssignment {
+		if len(assignments) == 0 {
+			plan[memberID] = make(map[string][]int32)
+		} else {
+			for _, assignment := range assignments {
+				plan.Add(memberID, assignment.Topic, assignment.Partition)
+			}
+		}
+	}
+	return plan, nil
+}
+
+// AssignmentData serializes the set of topics currently assigned to the
+// specified member as part of the supplied balance plan
+func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return encode(&StickyAssignorUserDataV1{
+		Topics:     topics,
+		Generation: generationID,
+	}, nil)
+}
+
+func strsContains(s []string, value string) bool {
+	for _, entry := range s {
+		if entry == value {
+			return true
+		}
+	}
+	return false
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+	initializing := false
+	if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
+		initializing = true
+	}
+
+	// assign all unassigned partitions
+	for _, partition := range unassignedPartitions {
+		// skip if there is no potential consumer for the partition
+		if len(partition2AllPotentialConsumers[partition]) == 0 {
+			continue
+		}
+		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+	}
+
+	// narrow down the reassignment scope to only those partitions that can actually be reassigned
+	for partition := range partition2AllPotentialConsumers {
+		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+		}
+	}
+
+	// narrow down the reassignment scope to only those consumers that are subject to reassignment
+	fixedAssignments := make(map[string][]topicPartitionAssignment)
+	for memberID := range consumer2AllPotentialPartitions {
+		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+			fixedAssignments[memberID] = currentAssignment[memberID]
+			delete(currentAssignment, memberID)
+			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+		}
+	}
+
+	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+	preBalanceAssignment := deepCopyAssignment(currentAssignment)
+	preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
+	for k, v := range currentPartitionConsumer {
+		preBalancePartitionConsumers[k] = v
+	}
+
+	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+	// if we are not preserving existing assignments and we have made changes to the current assignment
+	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+		currentAssignment = deepCopyAssignment(preBalanceAssignment)
+		currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
+		for k, v := range preBalancePartitionConsumers {
+			currentPartitionConsumer[k] = v
+		}
+	}
+
+	// add the fixed assignments (those that could not change) back
+	for consumer, assignments := range fixedAssignments {
+		currentAssignment[consumer] = assignments
+	}
+}
+
+// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
+// M0: [t0p0, t0p2, t1p1]
+// M1: [t0p1, t1p0, t1p2]
+var BalanceStrategyRoundRobin = new(roundRobinBalancer)
+
+type roundRobinBalancer struct{}
+
+func (b *roundRobinBalancer) Name() string {
+	return RoundRobinBalanceStrategyName
+}
+
+func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	if len(memberAndMetadata) == 0 || len(topics) == 0 {
+		return nil, errors.New("members and topics are not provided")
+	}
+	// sort partitions
+	var topicPartitions []topicAndPartition
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
+		}
+	}
+	sort.SliceStable(topicPartitions, func(i, j int) bool {
+		pi := topicPartitions[i]
+		pj := topicPartitions[j]
+		return pi.comparedValue() < pj.comparedValue()
+	})
+
+	// sort members
+	var members []memberAndTopic
+	for memberID, meta := range memberAndMetadata {
+		m := memberAndTopic{
+			memberID: memberID,
+			topics:   make(map[string]struct{}),
+		}
+		for _, t := range meta.Topics {
+			m.topics[t] = struct{}{}
+		}
+		members = append(members, m)
+	}
+	sort.SliceStable(members, func(i, j int) bool {
+		mi := members[i]
+		mj := members[j]
+		return mi.memberID < mj.memberID
+	})
+
+	// assign partitions
+	plan := make(BalanceStrategyPlan, len(members))
+	i := 0
+	n := len(members)
+	for _, tp := range topicPartitions {
+		m := members[i%n]
+		for !m.hasTopic(tp.topic) {
+			i++
+			m = members[i%n]
+		}
+		plan.Add(m.memberID, tp.topic, tp.partition)
+		i++
+	}
+	return plan, nil
+}
+
+func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil // do nothing for now
+}
+
+type topicAndPartition struct {
+	topic     string
+	partition int32
+}
+
+func (tp *topicAndPartition) comparedValue() string {
+	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
+}
+
+type memberAndTopic struct {
+	memberID string
+	topics   map[string]struct{}
+}
+
+func (m *memberAndTopic) hasTopic(topic string) bool {
+	_, isExist := m.topics[topic]
+	return isExist
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+	consumer2AssignmentSize := make(map[string]int, len(assignment))
+	for memberID, partitions := range assignment {
+		consumer2AssignmentSize[memberID] = len(partitions)
+	}
+
+	var score float64
+	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+		delete(consumer2AssignmentSize, memberID)
+		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+		}
+	}
+	return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+	if min >= max-1 {
+		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+		return true
+	}
+
+	// create a mapping from partitions to the consumer assigned to them
+	allPartitions := make(map[topicPartitionAssignment]string)
+	for memberID, partitions := range currentAssignment {
+		for _, partition := range partitions {
+			if _, exists := allPartitions[partition]; exists {
+				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+			}
+			allPartitions[partition] = memberID
+		}
+	}
+
+	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+	// could but did not get cannot be moved to it (because that would break the balance)
+	for _, memberID := range sortedCurrentSubscriptions {
+		consumerPartitions := currentAssignment[memberID]
+		consumerPartitionCount := len(consumerPartitions)
+
+		// skip if this consumer already has all the topic partitions it can get
+		if consumerPartitionCount == len(allSubscriptions[memberID]) {
+			continue
+		}
+
+		// otherwise make sure it cannot get any more
+		potentialTopicPartitions := allSubscriptions[memberID]
+		for _, partition := range potentialTopicPartitions {
+			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+				otherConsumer := allPartitions[partition]
+				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+				if consumerPartitionCount < otherConsumerPartitionCount {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+	reassignmentPerformed := false
+	modified := false
+
+	// repeat reassignment until no partition can be moved to improve the balance
+	for {
+		modified = false
+		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+		// until the full list is processed or a balance is achieved
+		for _, partition := range reassignablePartitions {
+			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
+				break
+			}
+
+			// the partition must have at least two consumers
+			if len(partition2AllPotentialConsumers[partition]) <= 1 {
+				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+			}
+
+			// the partition must have a consumer
+			consumer := currentPartitionConsumer[partition]
+			if consumer == "" {
+				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+			}
+
+			if _, exists := prevAssignment[partition]; exists {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+					reassignmentPerformed = true
+					modified = true
+					continue
+				}
+			}
+
+			// check if a better-suited consumer exists for the partition; if so, reassign it
+			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+					reassignmentPerformed = true
+					modified = true
+					break
+				}
+			}
+		}
+		if !modified {
+			return reassignmentPerformed
+		}
+	}
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+	for _, anotherConsumer := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+		}
+	}
+	return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+	consumer := currentPartitionConsumer[partition]
+	// find the correct partition movement considering the stickiness requirement
+	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	oldConsumer := currentPartitionConsumer[partition]
+	s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+	currentPartitionConsumer[partition] = newConsumer
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	currentPartitions := currentAssignment[memberID]
+	currentAssignmentSize := len(currentPartitions)
+	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+	if currentAssignmentSize > maxAssignmentSize {
+		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+	}
+	if currentAssignmentSize < maxAssignmentSize {
+		// if a consumer is not assigned all its potential partitions it is subject to reassignment
+		return true
+	}
+	for _, partition := range currentPartitions {
+		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			return true
+		}
+	}
+	return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	for _, memberID := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+			currentPartitionConsumer[partition] = memberID
+			break
+		}
+	}
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+	userDataV1 := &StickyAssignorUserDataV1{}
+	if err := decode(userDataBytes, userDataV1); err != nil {
+		userDataV0 := &StickyAssignorUserDataV0{}
+		if err := decode(userDataBytes, userDataV0); err != nil {
+			return nil, err
+		}
+		return userDataV0, nil
+	}
+	return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+	assignments := deepCopyAssignment(currentAssignment)
+	for memberID, partitions := range assignments {
+		// perform in-place filtering
+		i := 0
+		for _, partition := range partitions {
+			if _, exists := partition2AllPotentialConsumers[partition]; exists {
+				partitions[i] = partition
+				i++
+			}
+		}
+		assignments[memberID] = partitions[:i]
+	}
+	return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+	for i, assignment := range assignments {
+		if assignment == topic {
+			return append(assignments[:i], assignments[i+1:]...)
+		}
+	}
+	return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+	for _, assignment := range assignments {
+		if assignment == topic {
+			return true
+		}
+	}
+	return false
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unassignedPartitions[partition] = true
+	}
+
+	sortedPartitions := make([]topicPartitionAssignment, 0)
+	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+		// then we just need to simply list partitions in a round robin fashion (from consumers with
+		// most assigned partitions to those with least)
+		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+		// use priority-queue to evaluate consumer group members in descending-order based on
+		// the number of topic partition assignments (i.e. consumers with most assignments first)
+		pq := make(assignmentPriorityQueue, len(assignments))
+		i := 0
+		for consumerID, consumerAssignments := range assignments {
+			pq[i] = &consumerGroupMember{
+				id:          consumerID,
+				assignments: consumerAssignments,
+			}
+			i++
+		}
+		heap.Init(&pq)
+
+		for {
+			// loop until no consumer-group members remain
+			if pq.Len() == 0 {
+				break
+			}
+			member := pq[0]
+
+			// partitions that were assigned to a different consumer last time
+			var prevPartitionIndex int
+			for i, partition := range member.assignments {
+				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+					prevPartitionIndex = i
+					break
+				}
+			}
+
+			if len(member.assignments) > 0 {
+				partition := member.assignments[prevPartitionIndex]
+				sortedPartitions = append(sortedPartitions, partition)
+				delete(unassignedPartitions, partition)
+				if prevPartitionIndex == 0 {
+					member.assignments = member.assignments[1:]
+				} else {
+					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+				}
+				heap.Fix(&pq, 0)
+			} else {
+				heap.Pop(&pq)
+			}
+		}
+
+		for partition := range unassignedPartitions {
+			sortedPartitions = append(sortedPartitions, partition)
+		}
+	} else {
+		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
+		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+	}
+	return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+	// sort the members by the number of partition assignments in ascending order
+	sortedMemberIDs := make([]string, 0, len(assignments))
+	for memberID := range assignments {
+		sortedMemberIDs = append(sortedMemberIDs, memberID)
+	}
+	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+		if ret == 0 {
+			return sortedMemberIDs[i] < sortedMemberIDs[j]
+		}
+		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+	})
+	return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+	// sort the members by the number of partition assignments in descending order
+	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+	i := 0
+	for partition := range partition2AllPotentialConsumers {
+		sortedPartionIDs[i] = partition
+		i++
+	}
+	sort.Slice(sortedPartionIDs, func(i, j int) bool {
+		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+			if ret == 0 {
+				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+			}
+			return ret < 0
+		}
+		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+	})
+	return sortedPartionIDs
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+	m := make(map[string][]topicPartitionAssignment, len(assignment))
+	for memberID, subscriptions := range assignment {
+		m[memberID] = append(subscriptions[:0:0], subscriptions...)
+	}
+	return m
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+	curMembers := make(map[string]int)
+	for _, cur := range partition2AllPotentialConsumers {
+		if len(curMembers) == 0 {
+			for _, curMembersElem := range cur {
+				curMembers[curMembersElem]++
+			}
+			continue
+		}
+
+		if len(curMembers) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[string]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curMembers {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+
+	curPartitions := make(map[topicPartitionAssignment]int)
+	for _, cur := range consumer2AllPotentialPartitions {
+		if len(curPartitions) == 0 {
+			for _, curPartitionElem := range cur {
+				curPartitions[curPartitionElem]++
+			}
+			continue
+		}
+
+		if len(curPartitions) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[topicPartitionAssignment]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curPartitions {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+	currentAssignment := make(map[string][]topicPartitionAssignment)
+	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+	// for each partition we create a sorted map of its consumers by generation
+	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+	for memberID, meta := range members {
+		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, partition := range consumerUserData.partitions() {
+			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+				if consumerUserData.hasGeneration() {
+					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+						// same partition is assigned to two consumers during the same rebalance.
+						// log a warning and skip this record
+						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+						continue
+					} else {
+						consumers[consumerUserData.generation()] = memberID
+					}
+				} else {
+					consumers[defaultGeneration] = memberID
+				}
+			} else {
+				generation := defaultGeneration
+				if consumerUserData.hasGeneration() {
+					generation = consumerUserData.generation()
+				}
+				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+			}
+		}
+	}
+
+	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+	// current and previous consumers are the last two consumers of each partition in the above sorted map
+	for partition, consumers := range sortedPartitionConsumersByGeneration {
+		// sort consumers by generation in decreasing order
+		var generations []int
+		for generation := range consumers {
+			generations = append(generations, generation)
+		}
+		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+		consumer := consumers[generations[0]]
+		if _, exists := currentAssignment[consumer]; !exists {
+			currentAssignment[consumer] = []topicPartitionAssignment{partition}
+		} else {
+			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+		}
+
+		// check for previous assignment, if any
+		if len(generations) > 1 {
+			prevAssignment[partition] = consumerGenerationPair{
+				MemberID:   consumers[generations[1]],
+				Generation: generations[1],
+			}
+		}
+	}
+	return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+	MemberID   string
+	Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+	SrcMemberID string
+	DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+	Movements                 map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+	pair := p.Movements[partition]
+	delete(p.Movements, partition)
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	delete(partitionMovementsForThisTopic[pair], partition)
+	if len(partitionMovementsForThisTopic[pair]) == 0 {
+		delete(partitionMovementsForThisTopic, pair)
+	}
+	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+		delete(p.PartitionMovementsByTopic, partition.Topic)
+	}
+	return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+	p.Movements[partition] = pair
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+	pair := consumerPair{
+		SrcMemberID: oldConsumer,
+		DstMemberID: newConsumer,
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		existingPair := p.removeMovementRecordOfPartition(partition)
+		if existingPair.DstMemberID != oldConsumer {
+			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+		}
+		if existingPair.SrcMemberID != newConsumer {
+			// the partition is not moving back to its previous consumer
+			p.addPartitionMovementRecord(partition, consumerPair{
+				SrcMemberID: existingPair.SrcMemberID,
+				DstMemberID: newConsumer,
+			})
+		}
+	} else {
+		p.addPartitionMovementRecord(partition, pair)
+	}
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		return partition
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		if oldConsumer != p.Movements[partition].DstMemberID {
+			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+		}
+		oldConsumer = p.Movements[partition].SrcMemberID
+	}
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	reversePair := consumerPair{
+		SrcMemberID: newConsumer,
+		DstMemberID: oldConsumer,
+	}
+	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+		return partition
+	}
+	var reversePairPartition topicPartitionAssignment
+	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+		reversePairPartition = otherPartition
+	}
+	return reversePairPartition
+}
+
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+	if src == dst {
+		return currentPath, false
+	}
+	if len(pairs) == 0 {
+		return currentPath, false
+	}
+	for _, pair := range pairs {
+		if src == pair.SrcMemberID && dst == pair.DstMemberID {
+			currentPath = append(currentPath, src, dst)
+			return currentPath, true
+		}
+	}
+
+	for _, pair := range pairs {
+		if pair.SrcMemberID == src {
+			// create a deep copy of the pairs, excluding the current pair
+			reducedSet := make([]consumerPair, len(pairs)-1)
+			i := 0
+			for _, p := range pairs {
+				if p != pair {
+					reducedSet[i] = pair
+					i++
+				}
+			}
+
+			currentPath = append(currentPath, pair.SrcMemberID)
+			return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+		}
+	}
+	return currentPath, false
+}
+
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+	superCycle := make([]string, len(cycle)-1)
+	for i := 0; i < len(cycle)-1; i++ {
+		superCycle[i] = cycle[i]
+	}
+	superCycle = append(superCycle, cycle...)
+	for _, foundCycle := range cycles {
+		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+	cycles := make([][]string, 0)
+	for _, pair := range pairs {
+		// create a deep copy of the pairs, excluding the current pair
+		reducedPairs := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedPairs[i] = pair
+				i++
+			}
+		}
+		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+			if !p.in(path, cycles) {
+				cycles = append(cycles, path)
+				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+			}
+		}
+	}
+
+	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+	for _, cycle := range cycles {
+		if len(cycle) == 3 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) isSticky() bool {
+	for topic, movements := range p.PartitionMovementsByTopic {
+		movementPairs := make([]consumerPair, len(movements))
+		i := 0
+		for pair := range movements {
+			movementPairs[i] = pair
+			i++
+		}
+		if p.hasCycles(movementPairs) {
+			Logger.Printf("Stickiness is violated for topic %s", topic)
+			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+			return false
+		}
+	}
+	return true
+}
+
+func indexOfSubList(source []string, target []string) int {
+	targetSize := len(target)
+	maxCandidate := len(source) - targetSize
+nextCand:
+	for candidate := 0; candidate <= maxCandidate; candidate++ {
+		j := candidate
+		for i := 0; i < targetSize; i++ {
+			if target[i] != source[j] {
+				// Element mismatch, try next cand
+				continue nextCand
+			}
+			j++
+		}
+		// All elements of candidate matched target
+		return candidate
+	}
+	return -1
+}
+
+type consumerGroupMember struct {
+	id          string
+	assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+	// order asssignment priority queue in descending order using assignment-count/member-id
+	if len(pq[i].assignments) == len(pq[j].assignments) {
+		return strings.Compare(pq[i].id, pq[j].id) > 0
+	}
+	return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+	member := x.(*consumerGroupMember)
+	*pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	member := old[n-1]
+	*pq = old[0 : n-1]
+	return member
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
index 9c3e5a0..dd01e4e 100644
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -13,7 +13,7 @@
 	"sync/atomic"
 	"time"
 
-	metrics "github.com/rcrowley/go-metrics"
+	"github.com/rcrowley/go-metrics"
 )
 
 // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
@@ -40,6 +40,7 @@
 	outgoingByteRate       metrics.Meter
 	responseRate           metrics.Meter
 	responseSize           metrics.Histogram
+	requestsInFlight       metrics.Counter
 	brokerIncomingByteRate metrics.Meter
 	brokerRequestRate      metrics.Meter
 	brokerRequestSize      metrics.Histogram
@@ -47,6 +48,7 @@
 	brokerOutgoingByteRate metrics.Meter
 	brokerResponseRate     metrics.Meter
 	brokerResponseSize     metrics.Histogram
+	brokerRequestsInFlight metrics.Counter
 
 	kerberosAuthenticator GSSAPIKerberosAuth
 }
@@ -71,7 +73,7 @@
 	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
 	SASLHandshakeV1 = int16(1)
 	// SASLExtKeyAuth is the reserved extension key name sent as part of the
-	// SASL/OAUTHBEARER intial client response
+	// SASL/OAUTHBEARER initial client response
 	SASLExtKeyAuth = "auth"
 )
 
@@ -117,6 +119,7 @@
 type responsePromise struct {
 	requestTime   time.Time
 	correlationID int32
+	headerVersion int16
 	packets       chan []byte
 	errors        chan error
 }
@@ -151,27 +154,19 @@
 	go withRecover(func() {
 		defer b.lock.Unlock()
 
-		dialer := net.Dialer{
-			Timeout:   conf.Net.DialTimeout,
-			KeepAlive: conf.Net.KeepAlive,
-			LocalAddr: conf.Net.LocalAddr,
-		}
-
-		if conf.Net.TLS.Enable {
-			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
-		} else if conf.Net.Proxy.Enable {
-			b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
-		} else {
-			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
-		}
+		dialer := conf.getDialer()
+		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
 		if b.connErr != nil {
 			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
 			b.conn = nil
 			atomic.StoreInt32(&b.opened, 0)
 			return
 		}
-		b.conn = newBufConn(b.conn)
+		if conf.Net.TLS.Enable {
+			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
+		}
 
+		b.conn = newBufConn(b.conn)
 		b.conf = conf
 
 		// Create or reuse the global metrics shared between brokers
@@ -182,6 +177,7 @@
 		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
 		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
 		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
 		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
 		// the same id (-1) and are already exposed through the global metrics above
 		if b.id >= 0 {
@@ -189,7 +185,6 @@
 		}
 
 		if conf.Net.SASL.Enable {
-
 			b.connErr = b.authenticateViaSASL()
 
 			if b.connErr != nil {
@@ -228,7 +223,7 @@
 	return b.conn != nil, b.connErr
 }
 
-//Close closes the broker resources
+// Close closes the broker resources
 func (b *Broker) Close() error {
 	b.lock.Lock()
 	defer b.lock.Unlock()
@@ -281,12 +276,11 @@
 	return *b.rack
 }
 
-//GetMetadata send a metadata request and returns a metadata response or error
+// GetMetadata send a metadata request and returns a metadata response or error
 func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
 	response := new(MetadataResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -294,12 +288,11 @@
 	return response, nil
 }
 
-//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
 func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
 	response := new(ConsumerMetadataResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -307,12 +300,11 @@
 	return response, nil
 }
 
-//FindCoordinator sends a find coordinate request and returns a response or error
+// FindCoordinator sends a find coordinate request and returns a response or error
 func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
 	response := new(FindCoordinatorResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -320,12 +312,11 @@
 	return response, nil
 }
 
-//GetAvailableOffsets return an offset response or error
+// GetAvailableOffsets return an offset response or error
 func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
 	response := new(OffsetResponse)
 
 	err := b.sendAndReceive(request, response)
-
 	if err != nil {
 		return nil, err
 	}
@@ -333,7 +324,7 @@
 	return response, nil
 }
 
-//Produce returns a produce response or error
+// Produce returns a produce response or error
 func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
 	var (
 		response *ProduceResponse
@@ -354,7 +345,7 @@
 	return response, nil
 }
 
-//Fetch returns a FetchResponse or error
+// Fetch returns a FetchResponse or error
 func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
 	response := new(FetchResponse)
 
@@ -366,7 +357,7 @@
 	return response, nil
 }
 
-//CommitOffset return an Offset commit reponse or error
+// CommitOffset return an Offset commit response or error
 func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
 	response := new(OffsetCommitResponse)
 
@@ -378,9 +369,10 @@
 	return response, nil
 }
 
-//FetchOffset returns an offset fetch response or error
+// FetchOffset returns an offset fetch response or error
 func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
 	response := new(OffsetFetchResponse)
+	response.Version = request.Version // needed to handle the two header versions
 
 	err := b.sendAndReceive(request, response)
 	if err != nil {
@@ -390,7 +382,7 @@
 	return response, nil
 }
 
-//JoinGroup returns a join group response or error
+// JoinGroup returns a join group response or error
 func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
 	response := new(JoinGroupResponse)
 
@@ -402,7 +394,7 @@
 	return response, nil
 }
 
-//SyncGroup returns a sync group response or error
+// SyncGroup returns a sync group response or error
 func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
 	response := new(SyncGroupResponse)
 
@@ -414,7 +406,7 @@
 	return response, nil
 }
 
-//LeaveGroup return a leave group response or error
+// LeaveGroup return a leave group response or error
 func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
 	response := new(LeaveGroupResponse)
 
@@ -426,7 +418,7 @@
 	return response, nil
 }
 
-//Heartbeat returns a heartbeat response or error
+// Heartbeat returns a heartbeat response or error
 func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
 	response := new(HeartbeatResponse)
 
@@ -438,7 +430,7 @@
 	return response, nil
 }
 
-//ListGroups return a list group response or error
+// ListGroups return a list group response or error
 func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
 	response := new(ListGroupsResponse)
 
@@ -450,7 +442,7 @@
 	return response, nil
 }
 
-//DescribeGroups return describe group response or error
+// DescribeGroups return describe group response or error
 func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
 	response := new(DescribeGroupsResponse)
 
@@ -462,7 +454,7 @@
 	return response, nil
 }
 
-//ApiVersions return api version response or error
+// ApiVersions return api version response or error
 func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
 	response := new(ApiVersionsResponse)
 
@@ -474,7 +466,7 @@
 	return response, nil
 }
 
-//CreateTopics send a create topic request and returns create topic response
+// CreateTopics send a create topic request and returns create topic response
 func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
 	response := new(CreateTopicsResponse)
 
@@ -486,7 +478,7 @@
 	return response, nil
 }
 
-//DeleteTopics sends a delete topic request and returns delete topic response
+// DeleteTopics sends a delete topic request and returns delete topic response
 func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
 	response := new(DeleteTopicsResponse)
 
@@ -498,8 +490,8 @@
 	return response, nil
 }
 
-//CreatePartitions sends a create partition request and returns create
-//partitions response or error
+// CreatePartitions sends a create partition request and returns create
+// partitions response or error
 func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
 	response := new(CreatePartitionsResponse)
 
@@ -511,8 +503,34 @@
 	return response, nil
 }
 
-//DeleteRecords send a request to delete records and return delete record
-//response or error
+// AlterPartitionReassignments sends a alter partition reassignments request and
+// returns alter partition reassignments response
+func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
+	response := new(AlterPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListPartitionReassignments sends a list partition reassignments request and
+// returns list partition reassignments response
+func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
+	response := new(ListPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteRecords send a request to delete records and return delete record
+// response or error
 func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
 	response := new(DeleteRecordsResponse)
 
@@ -524,7 +542,7 @@
 	return response, nil
 }
 
-//DescribeAcls sends a describe acl request and returns a response or error
+// DescribeAcls sends a describe acl request and returns a response or error
 func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
 	response := new(DescribeAclsResponse)
 
@@ -536,7 +554,7 @@
 	return response, nil
 }
 
-//CreateAcls sends a create acl request and returns a response or error
+// CreateAcls sends a create acl request and returns a response or error
 func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
 	response := new(CreateAclsResponse)
 
@@ -548,7 +566,7 @@
 	return response, nil
 }
 
-//DeleteAcls sends a delete acl request and returns a response or error
+// DeleteAcls sends a delete acl request and returns a response or error
 func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
 	response := new(DeleteAclsResponse)
 
@@ -560,7 +578,7 @@
 	return response, nil
 }
 
-//InitProducerID sends an init producer request and returns a response or error
+// InitProducerID sends an init producer request and returns a response or error
 func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
 	response := new(InitProducerIDResponse)
 
@@ -572,8 +590,8 @@
 	return response, nil
 }
 
-//AddPartitionsToTxn send a request to add partition to txn and returns
-//a response or error
+// AddPartitionsToTxn send a request to add partition to txn and returns
+// a response or error
 func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
 	response := new(AddPartitionsToTxnResponse)
 
@@ -585,8 +603,8 @@
 	return response, nil
 }
 
-//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-//or error
+// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+// or error
 func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
 	response := new(AddOffsetsToTxnResponse)
 
@@ -598,7 +616,7 @@
 	return response, nil
 }
 
-//EndTxn sends a request to end txn and returns a response or error
+// EndTxn sends a request to end txn and returns a response or error
 func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
 	response := new(EndTxnResponse)
 
@@ -610,8 +628,8 @@
 	return response, nil
 }
 
-//TxnOffsetCommit sends a request to commit transaction offsets and returns
-//a response or error
+// TxnOffsetCommit sends a request to commit transaction offsets and returns
+// a response or error
 func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
 	response := new(TxnOffsetCommitResponse)
 
@@ -623,8 +641,8 @@
 	return response, nil
 }
 
-//DescribeConfigs sends a request to describe config and returns a response or
-//error
+// DescribeConfigs sends a request to describe config and returns a response or
+// error
 func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
 	response := new(DescribeConfigsResponse)
 
@@ -636,7 +654,7 @@
 	return response, nil
 }
 
-//AlterConfigs sends a request to alter config and return a response or error
+// AlterConfigs sends a request to alter config and return a response or error
 func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
 	response := new(AlterConfigsResponse)
 
@@ -648,7 +666,19 @@
 	return response, nil
 }
 
-//DeleteGroups sends a request to delete groups and returns a response or error
+// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
+func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
+	response := new(IncrementalAlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteGroups sends a request to delete groups and returns a response or error
 func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
 	response := new(DeleteGroupsResponse)
 
@@ -659,7 +689,62 @@
 	return response, nil
 }
 
-func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+	response := new(DescribeLogDirsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeUserScramCredentials sends a request to get SCRAM users
+func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+	res := new(DescribeUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, err
+}
+
+func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+	res := new(AlterUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+		return 0, err
+	}
+
+	return io.ReadFull(b.conn, buf)
+}
+
+// write  ensures the conn WriteDeadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+
+	return b.conn.Write(buf)
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) {
 	b.lock.Lock()
 	defer b.lock.Unlock()
 
@@ -680,33 +765,36 @@
 		return nil, err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		return nil, err
-	}
-
 	requestTime := time.Now()
-	bytes, err := b.conn.Write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes) //TODO: should it be after error check
+	// Will be decremented in responseReceiver (except error or request with NoResponse)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		return nil, err
 	}
 	b.correlationID++
 
 	if !promiseResponse {
 		// Record request latency without the response
-		b.updateRequestLatencyMetrics(time.Since(requestTime))
+		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
 		return nil, nil
 	}
 
-	promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+	promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)}
 	b.responses <- promise
 
 	return &promise, nil
 }
 
-func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
-	promise, err := b.send(req, res != nil)
+func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
+	responseHeaderVersion := int16(-1)
+	if res != nil {
+		responseHeaderVersion = res.headerVersion()
+	}
+
+	promise, err := b.send(req, res != nil, responseHeaderVersion)
 	if err != nil {
 		return err
 	}
@@ -760,7 +848,7 @@
 		return err
 	}
 
-	port, err := strconv.Atoi(portstr)
+	port, err := strconv.ParseInt(portstr, 10, 32)
 	if err != nil {
 		return err
 	}
@@ -786,22 +874,20 @@
 
 func (b *Broker) responseReceiver() {
 	var dead error
-	header := make([]byte, 8)
 
 	for response := range b.responses {
 		if dead != nil {
+			// This was previously incremented in send() and
+			// we are not calling updateIncomingCommunicationMetrics()
+			b.addRequestInFlightMetrics(-1)
 			response.errors <- dead
 			continue
 		}
 
-		err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
-		if err != nil {
-			dead = err
-			response.errors <- err
-			continue
-		}
+		headerLength := getHeaderLength(response.headerVersion)
+		header := make([]byte, headerLength)
 
-		bytesReadHeader, err := io.ReadFull(b.conn, header)
+		bytesReadHeader, err := b.readFull(header)
 		requestLatency := time.Since(response.requestTime)
 		if err != nil {
 			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
@@ -811,7 +897,7 @@
 		}
 
 		decodedHeader := responseHeader{}
-		err = decode(header, &decodedHeader)
+		err = versionedDecode(header, &decodedHeader, response.headerVersion)
 		if err != nil {
 			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
 			dead = err
@@ -827,8 +913,8 @@
 			continue
 		}
 
-		buf := make([]byte, decodedHeader.length-4)
-		bytesReadBody, err := io.ReadFull(b.conn, buf)
+		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
+		bytesReadBody, err := b.readFull(buf)
 		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
 		if err != nil {
 			dead = err
@@ -841,6 +927,15 @@
 	close(b.done)
 }
 
+func getHeaderLength(headerVersion int16) int8 {
+	if headerVersion < 1 {
+		return 8
+	} else {
+		// header contains additional tagged field length (0), we don't support actual tags yet.
+		return 9
+	}
+}
+
 func (b *Broker) authenticateViaSASL() error {
 	switch b.conf.Net.SASL.Mechanism {
 	case SASLTypeOAuth:
@@ -871,31 +966,31 @@
 		return err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		return err
-	}
-
 	requestTime := time.Now()
-	bytes, err := b.conn.Write(buf)
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
 	b.updateOutgoingCommunicationMetrics(bytes)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
 		return err
 	}
 	b.correlationID++
-	//wait for the response
+
 	header := make([]byte, 8) // response header
-	_, err = io.ReadFull(b.conn, header)
+	_, err = b.readFull(header)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
 		return err
 	}
 
 	length := binary.BigEndian.Uint32(header[:4])
 	payload := make([]byte, length-4)
-	n, err := io.ReadFull(b.conn, payload)
+	n, err := b.readFull(payload)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
 		return err
 	}
@@ -943,10 +1038,9 @@
 // When credentials are invalid, Kafka replies with a SaslAuthenticate response
 // containing an error code and message detailing the authentication failure.
 func (b *Broker) sendAndReceiveSASLPlainAuth() error {
-	// default to V0 to allow for backward compatability when SASL is enabled
+	// default to V0 to allow for backward compatibility when SASL is enabled
 	// but not the handshake
 	if b.conf.Net.SASL.Handshake {
-
 		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
 		if handshakeErr != nil {
 			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
@@ -962,28 +1056,24 @@
 
 // sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
 func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-
-	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
-	authBytes := make([]byte, length+4) //4 byte length header + auth data
+	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) // 4 byte length header + auth data
 	binary.BigEndian.PutUint32(authBytes, uint32(length))
-	copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
-
-	err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
+	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
 
 	requestTime := time.Now()
-	bytesWritten, err := b.conn.Write(authBytes)
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytesWritten, err := b.write(authBytes)
 	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 		return err
 	}
 
 	header := make([]byte, 4)
-	n, err := io.ReadFull(b.conn, header)
+	n, err := b.readFull(header)
 	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
 	// If the credentials are valid, we would get a 4 byte response filled with null characters.
 	// Otherwise, the broker closes the connection and we get an EOF
@@ -1002,11 +1092,13 @@
 
 	requestTime := time.Now()
 
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
 	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
-
 	b.updateOutgoingCommunicationMetrics(bytesWritten)
 
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 		return err
 	}
@@ -1059,16 +1151,18 @@
 // if the broker responds with a challenge, in which case the token is
 // rejected.
 func (b *Broker) sendClientMessage(message []byte) (bool, error) {
-
 	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
 	correlationID := b.correlationID
 
 	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	if err != nil {
+		b.addRequestInFlightMetrics(-1)
 		return false, err
 	}
 
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
 	b.correlationID++
 
 	res := &SaslAuthenticateResponse{}
@@ -1099,22 +1193,25 @@
 	msg, err := scramClient.Step("")
 	if err != nil {
 		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-
 	}
 
 	for !scramClient.Done() {
 		requestTime := time.Now()
+		// Will be decremented in updateIncomingCommunicationMetrics (except error)
+		b.addRequestInFlightMetrics(1)
 		correlationID := b.correlationID
 		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
+		b.updateOutgoingCommunicationMetrics(bytesWritten)
 		if err != nil {
+			b.addRequestInFlightMetrics(-1)
 			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
 			return err
 		}
 
-		b.updateOutgoingCommunicationMetrics(bytesWritten)
 		b.correlationID++
 		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
 		if err != nil {
+			b.addRequestInFlightMetrics(-1)
 			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
 			return err
 		}
@@ -1139,22 +1236,18 @@
 		return 0, err
 	}
 
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
 	buf := make([]byte, responseLengthSize+correlationIDSize)
-	_, err := io.ReadFull(b.conn, buf)
+	_, err := b.readFull(buf)
 	if err != nil {
 		return nil, err
 	}
 
 	header := responseHeader{}
-	err = decode(buf, &header)
+	err = versionedDecode(buf, &header, 0)
 	if err != nil {
 		return nil, err
 	}
@@ -1164,7 +1257,7 @@
 	}
 
 	buf = make([]byte, header.length-correlationIDSize)
-	_, err = io.ReadFull(b.conn, buf)
+	_, err = b.readFull(buf)
 	if err != nil {
 		return nil, err
 	}
@@ -1211,7 +1304,7 @@
 }
 
 func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
-	authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
 	rb := &SaslAuthenticateRequest{authBytes}
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
 	buf, err := encode(req, b.conf.MetricRegistry)
@@ -1219,16 +1312,10 @@
 		return 0, err
 	}
 
-	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
-	if err != nil {
-		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
-		return 0, err
-	}
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
-
 	rb := &SaslAuthenticateRequest{initialResp}
 
 	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
@@ -1238,25 +1325,18 @@
 		return 0, err
 	}
 
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
+	return b.write(buf)
 }
 
 func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
-
 	buf := make([]byte, responseLengthSize+correlationIDSize)
-
-	bytesRead, err := io.ReadFull(b.conn, buf)
+	bytesRead, err := b.readFull(buf)
 	if err != nil {
 		return bytesRead, err
 	}
 
 	header := responseHeader{}
-
-	err = decode(buf, &header)
+	err = versionedDecode(buf, &header, 0)
 	if err != nil {
 		return bytesRead, err
 	}
@@ -1266,8 +1346,7 @@
 	}
 
 	buf = make([]byte, header.length-correlationIDSize)
-
-	c, err := io.ReadFull(b.conn, buf)
+	c, err := b.readFull(buf)
 	bytesRead += c
 	if err != nil {
 		return bytesRead, err
@@ -1285,7 +1364,7 @@
 }
 
 func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
-	b.updateRequestLatencyMetrics(requestLatency)
+	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
 	b.responseRate.Mark(1)
 
 	if b.brokerResponseRate != nil {
@@ -1304,7 +1383,7 @@
 	}
 }
 
-func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
 	requestLatencyInMs := int64(requestLatency / time.Millisecond)
 	b.requestLatency.Update(requestLatencyInMs)
 
@@ -1312,6 +1391,14 @@
 		b.brokerRequestLatency.Update(requestLatencyInMs)
 	}
 
+	b.addRequestInFlightMetrics(-1)
+}
+
+func (b *Broker) addRequestInFlightMetrics(i int64) {
+	b.requestsInFlight.Inc(i)
+	if b.brokerRequestsInFlight != nil {
+		b.brokerRequestsInFlight.Inc(i)
+	}
 }
 
 func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
@@ -1330,7 +1417,6 @@
 	if b.brokerRequestSize != nil {
 		b.brokerRequestSize.Update(requestSize)
 	}
-
 }
 
 func (b *Broker) registerMetrics() {
@@ -1341,12 +1427,14 @@
 	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
 	b.brokerResponseRate = b.registerMeter("response-rate")
 	b.brokerResponseSize = b.registerHistogram("response-size")
+	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
 }
 
 func (b *Broker) unregisterMetrics() {
 	for _, name := range b.registeredMetrics {
 		b.conf.MetricRegistry.Unregister(name)
 	}
+	b.registeredMetrics = nil
 }
 
 func (b *Broker) registerMeter(name string) metrics.Meter {
@@ -1360,3 +1448,28 @@
 	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
 	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
 }
+
+func (b *Broker) registerCounter(name string) metrics.Counter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry)
+}
+
+func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		cfg = &tls.Config{
+			MinVersion: tls.VersionTLS12,
+		}
+	}
+	if cfg.ServerName != "" {
+		return cfg
+	}
+
+	c := cfg.Clone()
+	sn, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
+	}
+	c.ServerName = sn
+	return c
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
index c4c54b2..c0918ba 100644
--- a/vendor/github.com/Shopify/sarama/client.go
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -17,12 +17,21 @@
 	// altered after it has been created.
 	Config() *Config
 
-	// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available. You can call RefreshController
+	// to update the cached value. Requires Kafka 0.10 or higher.
 	Controller() (*Broker, error)
 
+	// RefreshController retrieves the cluster controller from fresh metadata
+	// and stores it in the local cache. Requires Kafka 0.10 or higher.
+	RefreshController() (*Broker, error)
+
 	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
 	Brokers() []*Broker
 
+	// Broker returns the active Broker if available for the broker ID.
+	Broker(brokerID int32) (*Broker, error)
+
 	// Topics returns the set of available topics as retrieved from cluster metadata.
 	Topics() ([]string, error)
 
@@ -50,6 +59,11 @@
 	// partition. Offline replicas are replicas which are offline
 	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
 
+	// RefreshBrokers takes a list of addresses to be used as seed brokers.
+	// Existing broker connections are closed and the updated list of seed brokers
+	// will be used for the next metadata fetch.
+	RefreshBrokers(addrs []string) error
+
 	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
 	// available metadata for those topics. If no topics are provided, it will refresh
 	// metadata for all topics.
@@ -149,10 +163,7 @@
 		coordinators:            make(map[string]int32),
 	}
 
-	random := rand.New(rand.NewSource(time.Now().UnixNano()))
-	for _, index := range random.Perm(len(addrs)) {
-		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
-	}
+	client.randomizeSeedBrokers(addrs)
 
 	if conf.Metadata.Full {
 		// do an initial fetch of all cluster metadata by specifying an empty list of topics
@@ -190,10 +201,20 @@
 	return brokers
 }
 
+func (client *client) Broker(brokerID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	broker, ok := client.brokers[brokerID]
+	if !ok {
+		return nil, ErrBrokerNotFound
+	}
+	_ = broker.Open(client.conf)
+	return broker, nil
+}
+
 func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
 	var err error
 	for broker := client.any(); broker != nil; broker = client.any() {
-
 		req := &InitProducerIDRequest{}
 
 		response, err := broker.InitProducerID(req)
@@ -242,6 +263,9 @@
 }
 
 func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
 	return client.brokers == nil
 }
 
@@ -421,6 +445,27 @@
 	return leader, err
 }
 
+func (client *client) RefreshBrokers(addrs []string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	for _, broker := range client.brokers {
+		_ = broker.Close()
+		delete(client.brokers, broker.ID())
+	}
+
+	client.seedBrokers = nil
+	client.deadSeeds = nil
+
+	client.randomizeSeedBrokers(addrs)
+
+	return nil
+}
+
 func (client *client) RefreshMetadata(topics ...string) error {
 	if client.Closed() {
 		return ErrClosedClient
@@ -430,7 +475,7 @@
 	// error. This handles the case by returning an error instead of sending it
 	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
 	for _, topic := range topics {
-		if len(topic) == 0 {
+		if topic == "" {
 			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
 		}
 	}
@@ -448,7 +493,6 @@
 	}
 
 	offset, err := client.getOffset(topic, partitionID, time)
-
 	if err != nil {
 		if err := client.RefreshMetadata(topic); err != nil {
 			return -1, err
@@ -484,6 +528,35 @@
 	return controller, nil
 }
 
+// deregisterController removes the cached controllerID
+func (client *client) deregisterController() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	delete(client.brokers, client.controllerID)
+}
+
+// RefreshController retrieves the cluster controller from fresh metadata
+// and stores it in the local cache. Requires Kafka 0.10 or higher.
+func (client *client) RefreshController() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.deregisterController()
+
+	if err := client.refreshMetadata(); err != nil {
+		return nil, err
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
 func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
 	if client.Closed() {
 		return nil, ErrClosedClient
@@ -525,10 +598,46 @@
 
 // private broker management helpers
 
+func (client *client) randomizeSeedBrokers(addrs []string) {
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+}
+
+func (client *client) updateBroker(brokers []*Broker) {
+	currentBroker := make(map[int32]*Broker, len(brokers))
+
+	for _, broker := range brokers {
+		currentBroker[broker.ID()] = broker
+		if client.brokers[broker.ID()] == nil { // add new broker
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
+			safeAsyncClose(client.brokers[broker.ID()])
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+
+	for id, broker := range client.brokers {
+		if _, exist := currentBroker[id]; !exist { // remove old broker
+			safeAsyncClose(broker)
+			delete(client.brokers, id)
+			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+}
+
 // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
 // in the brokers map. It returns the broker that is registered, which may be the provided broker,
 // or a previously registered Broker instance. You must hold the write lock before calling this function.
 func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
 	if client.brokers[broker.ID()] == nil {
 		client.brokers[broker.ID()] = broker
 		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
@@ -722,7 +831,7 @@
 }
 
 func (client *client) refreshMetadata() error {
-	topics := []string{}
+	var topics []string
 
 	if !client.conf.Metadata.Full {
 		if specificTopics, err := client.MetadataTopics(); err != nil {
@@ -756,7 +865,7 @@
 				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
 				return err
 			}
-			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
 			if backoff > 0 {
 				time.Sleep(backoff)
 			}
@@ -782,7 +891,7 @@
 			req.Version = 1
 		}
 		response, err := broker.GetMetadata(req)
-		switch err.(type) {
+		switch err := err.(type) {
 		case nil:
 			allKnownMetaData := len(topics) == 0
 			// valid response, use it
@@ -799,10 +908,15 @@
 
 		case KError:
 			// if SASL auth error return as this _should_ be a non retryable err for all brokers
-			if err.(KError) == ErrSASLAuthenticationFailed {
+			if err == ErrSASLAuthenticationFailed {
 				Logger.Println("client/metadata failed SASL authentication")
 				return err
 			}
+
+			if err == ErrTopicAuthorizationFailed {
+				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+				return err
+			}
 			// else remove that broker and try again
 			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
 			_ = broker.Close()
@@ -817,7 +931,7 @@
 	}
 
 	if broker != nil {
-		Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
 		return retry(ErrOutOfBrokers)
 	}
 
@@ -828,16 +942,19 @@
 
 // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
 func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
 	client.lock.Lock()
 	defer client.lock.Unlock()
 
 	// For all the brokers we received:
 	// - if it is a new ID, save it
 	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - if some brokers is not exist in it, remove old broker
 	// - otherwise ignore it, replacing our existing one would just bounce the connection
-	for _, broker := range data.Brokers {
-		client.registerBroker(broker)
-	}
+	client.updateBroker(data.Brokers)
 
 	client.controllerID = data.ControllerID
 
@@ -935,7 +1052,6 @@
 		request.CoordinatorType = CoordinatorGroup
 
 		response, err := broker.FindCoordinator(request)
-
 		if err != nil {
 			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
 
@@ -966,6 +1082,10 @@
 			}
 
 			return retry(ErrConsumerCoordinatorNotAvailable)
+		case ErrGroupAuthorizationFailed:
+			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
+			return retry(ErrGroupAuthorizationFailed)
+
 		default:
 			return nil, response.Err
 		}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
index 94b716e..12cd7c3 100644
--- a/vendor/github.com/Shopify/sarama/compress.go
+++ b/vendor/github.com/Shopify/sarama/compress.go
@@ -6,7 +6,7 @@
 	"fmt"
 	"sync"
 
-	"github.com/eapache/go-xerial-snappy"
+	snappy "github.com/eapache/go-xerial-snappy"
 	"github.com/pierrec/lz4"
 )
 
@@ -22,6 +22,87 @@
 			return gzip.NewWriter(nil)
 		},
 	}
+	gzipWriterPoolForCompressionLevel1 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 1)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel2 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 2)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel3 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 3)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel4 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 4)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel5 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 5)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel6 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 6)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel7 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 7)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel8 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 8)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel9 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 9)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
 )
 
 func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
@@ -34,15 +115,53 @@
 			buf    bytes.Buffer
 			writer *gzip.Writer
 		)
-		if level != CompressionLevelDefault {
+
+		switch level {
+		case CompressionLevelDefault:
+			writer = gzipWriterPool.Get().(*gzip.Writer)
+			defer gzipWriterPool.Put(writer)
+			writer.Reset(&buf)
+		case 1:
+			writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel1.Put(writer)
+			writer.Reset(&buf)
+		case 2:
+			writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel2.Put(writer)
+			writer.Reset(&buf)
+		case 3:
+			writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel3.Put(writer)
+			writer.Reset(&buf)
+		case 4:
+			writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel4.Put(writer)
+			writer.Reset(&buf)
+		case 5:
+			writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel5.Put(writer)
+			writer.Reset(&buf)
+		case 6:
+			writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel6.Put(writer)
+			writer.Reset(&buf)
+		case 7:
+			writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel7.Put(writer)
+			writer.Reset(&buf)
+		case 8:
+			writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel8.Put(writer)
+			writer.Reset(&buf)
+		case 9:
+			writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
+			defer gzipWriterPoolForCompressionLevel9.Put(writer)
+			writer.Reset(&buf)
+		default:
 			writer, err = gzip.NewWriterLevel(&buf, level)
 			if err != nil {
 				return nil, err
 			}
-		} else {
-			writer = gzipWriterPool.Get().(*gzip.Writer)
-			defer gzipWriterPool.Put(writer)
-			writer.Reset(&buf)
 		}
 		if _, err := writer.Write(data); err != nil {
 			return nil, err
@@ -68,7 +187,7 @@
 		}
 		return buf.Bytes(), nil
 	case CompressionZSTD:
-		return zstdCompressLevel(nil, data, level)
+		return zstdCompress(nil, data)
 	default:
 		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
 	}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
index e2e6513..43e739c 100644
--- a/vendor/github.com/Shopify/sarama/config.go
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -21,6 +21,13 @@
 type Config struct {
 	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
 	Admin struct {
+		Retry struct {
+			// The total number of times to retry sending (retriable) admin requests (default 5).
+			// Similar to the `retries` setting of the JVM AdminClientConfig.
+			Max int
+			// Backoff time between retries of a failed request (default 100ms)
+			Backoff time.Duration
+		}
 		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
 		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
 		Timeout time.Duration
@@ -65,8 +72,15 @@
 			// (defaults to true). You should only set this to false if you're using
 			// a non-Kafka SASL proxy.
 			Handshake bool
-			//username and password for SASL/PLAIN  or SASL/SCRAM authentication
-			User     string
+			// AuthIdentity is an (optional) authorization identity (authzid) to
+			// use for SASL/PLAIN authentication (if different from User) when
+			// an authenticated user is permitted to act as the presented
+			// alternative user. See RFC4616 for details.
+			AuthIdentity string
+			// User is the authentication identity (authcid) to present for
+			// SASL/PLAIN or SASL/SCRAM authentication
+			User string
+			// Password for SASL/PLAIN authentication
 			Password string
 			// authz id used for SASL/SCRAM authentication
 			SCRAMAuthzID string
@@ -82,8 +96,9 @@
 			GSSAPI GSSAPIConfig
 		}
 
-		// KeepAlive specifies the keep-alive period for an active network connection.
-		// If zero, keep-alives are disabled. (default is 0: disabled).
+		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
+		// If zero or positive, keep-alives are enabled.
+		// If negative, keep-alives are disabled.
 		KeepAlive time.Duration
 
 		// LocalAddr is the local address to use when dialing an
@@ -214,6 +229,14 @@
 			// `Backoff` if set.
 			BackoffFunc func(retries, maxRetries int) time.Duration
 		}
+
+		// Interceptors to be called when the producer dispatcher reads the
+		// message for the first time. Interceptors allows to intercept and
+		// possible mutate the message before they are published to Kafka
+		// cluster. *ProducerMessage modified by the first interceptor's
+		// OnSend() is passed to the second interceptor OnSend(), and so on in
+		// the interceptor chain.
+		Interceptors []ProducerInterceptor
 	}
 
 	// Consumer is the namespace for configuration related to consuming messages,
@@ -312,7 +335,7 @@
 		// than this, that partition will stop fetching more messages until it
 		// can proceed again.
 		// Note that, since the Messages channel is buffered, the actual grace time is
-		// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
 		// If a message is not written to the Messages channel between two ticks
 		// of the expiryTicker then a timeout is detected.
 		// Using a ticker instead of a timer to detect timeouts should typically
@@ -338,9 +361,21 @@
 		// offsets. This currently requires the manual use of an OffsetManager
 		// but will eventually be automated.
 		Offsets struct {
-			// How frequently to commit updated offsets. Defaults to 1s.
+			// Deprecated: CommitInterval exists for historical compatibility
+			// and should not be used. Please use Consumer.Offsets.AutoCommit
 			CommitInterval time.Duration
 
+			// AutoCommit specifies configuration for commit messages automatically.
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
+
 			// The initial offset to use if no offset was previously committed.
 			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
 			Initial int64
@@ -364,12 +399,24 @@
 		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
 		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
 		IsolationLevel IsolationLevel
+
+		// Interceptors to be called just before the record is sent to the
+		// messages channel. Interceptors allows to intercept and possible
+		// mutate the message before they are returned to the client.
+		// *ConsumerMessage modified by the first interceptor's OnConsume() is
+		// passed to the second interceptor OnConsume(), and so on in the
+		// interceptor chain.
+		Interceptors []ConsumerInterceptor
 	}
 
 	// A user-provided string sent with every request to the brokers for logging,
 	// debugging, and auditing purposes. Defaults to "sarama", but you should
 	// probably set it to something specific to your application.
 	ClientID string
+	// A rack identifier for this client. This can be any string value which
+	// indicates where this client is physically located.
+	// It corresponds with the broker config 'broker.rack'
+	RackID string
 	// The number of events to buffer in internal and external channels. This
 	// permits the producer and consumer to continue processing some messages
 	// in the background while user code is working, greatly improving throughput.
@@ -394,6 +441,8 @@
 func NewConfig() *Config {
 	c := &Config{}
 
+	c.Admin.Retry.Max = 5
+	c.Admin.Retry.Backoff = 100 * time.Millisecond
 	c.Admin.Timeout = 3 * time.Second
 
 	c.Net.MaxOpenRequests = 5
@@ -423,7 +472,8 @@
 	c.Consumer.MaxWaitTime = 250 * time.Millisecond
 	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
 	c.Consumer.Return.Errors = false
-	c.Consumer.Offsets.CommitInterval = 1 * time.Second
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
 	c.Consumer.Offsets.Initial = OffsetNewest
 	c.Consumer.Offsets.Retry.Max = 3
 
@@ -436,7 +486,7 @@
 
 	c.ClientID = defaultClientID
 	c.ChannelBufferSize = 256
-	c.Version = MinVersion
+	c.Version = DefaultVersion
 	c.MetricRegistry = metrics.NewRegistry()
 
 	return c
@@ -504,8 +554,6 @@
 		return ConfigurationError("Net.ReadTimeout must be > 0")
 	case c.Net.WriteTimeout <= 0:
 		return ConfigurationError("Net.WriteTimeout must be > 0")
-	case c.Net.KeepAlive < 0:
-		return ConfigurationError("Net.KeepAlive must be >= 0")
 	case c.Net.SASL.Enable:
 		if c.Net.SASL.Mechanism == "" {
 			c.Net.SASL.Mechanism = SASLTypePlaintext
@@ -621,6 +669,10 @@
 		}
 	}
 
+	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
+		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
+	}
+
 	if c.Producer.Idempotent {
 		if !c.Version.IsAtLeast(V0_11_0_0) {
 			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
@@ -650,8 +702,8 @@
 		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
 	case c.Consumer.Retry.Backoff < 0:
 		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
-	case c.Consumer.Offsets.CommitInterval <= 0:
-		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
 	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
 		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
 	case c.Consumer.Offsets.Retry.Max < 0:
@@ -660,6 +712,11 @@
 		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
 	}
 
+	if c.Consumer.Offsets.CommitInterval != 0 {
+		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
+	}
+
 	// validate IsolationLevel
 	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
 		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
@@ -693,3 +750,16 @@
 
 	return nil
 }
+
+func (c *Config) getDialer() proxy.Dialer {
+	if c.Net.Proxy.Enable {
+		Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
+		return c.Net.Proxy.Dialer
+	} else {
+		return &net.Dialer{
+			Timeout:   c.Net.DialTimeout,
+			KeepAlive: c.Net.KeepAlive,
+			LocalAddr: c.Net.LocalAddr,
+		}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
index 5399d75..bef1053 100644
--- a/vendor/github.com/Shopify/sarama/config_resource_type.go
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -1,22 +1,18 @@
 package sarama
 
-//ConfigResourceType is a type for config resource
+// ConfigResourceType is a type for resources that have configs.
 type ConfigResourceType int8
 
-// Taken from :
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+// Taken from:
+// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55
 
 const (
-	//UnknownResource constant type
-	UnknownResource ConfigResourceType = iota
-	//AnyResource constant type
-	AnyResource
-	//TopicResource constant type
-	TopicResource
-	//GroupResource constant type
-	GroupResource
-	//ClusterResource constant type
-	ClusterResource
-	//BrokerResource constant type
-	BrokerResource
+	// UnknownResource constant type
+	UnknownResource ConfigResourceType = 0
+	// TopicResource constant type
+	TopicResource ConfigResourceType = 2
+	// BrokerResource constant type
+	BrokerResource ConfigResourceType = 4
+	// BrokerLoggerResource constant type
+	BrokerLoggerResource ConfigResourceType = 8
 )
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
index 72c4d7c..f9cd172 100644
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -35,6 +35,10 @@
 	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
 }
 
+func (ce ConsumerError) Unwrap() error {
+	return ce.Err
+}
+
 // ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
 // It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
 // when stopping.
@@ -299,6 +303,8 @@
 	errors   chan *ConsumerError
 	feeder   chan *FetchResponse
 
+	preferredReadReplica int32
+
 	trigger, dying chan none
 	closeOnce      sync.Once
 	topic          string
@@ -359,18 +365,29 @@
 	close(child.feeder)
 }
 
+func (child *partitionConsumer) preferredBroker() (*Broker, error) {
+	if child.preferredReadReplica >= 0 {
+		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
+		if err == nil {
+			return broker, nil
+		}
+	}
+
+	// if prefered replica cannot be found fallback to leader
+	return child.consumer.client.Leader(child.topic, child.partition)
+}
+
 func (child *partitionConsumer) dispatch() error {
 	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
 		return err
 	}
 
-	var leader *Broker
-	var err error
-	if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+	broker, err := child.preferredBroker()
+	if err != nil {
 		return err
 	}
 
-	child.broker = child.consumer.refBrokerConsumer(leader)
+	child.broker = child.consumer.refBrokerConsumer(broker)
 
 	child.broker.input <- child
 
@@ -422,13 +439,13 @@
 func (child *partitionConsumer) Close() error {
 	child.AsyncClose()
 
-	var errors ConsumerErrors
+	var consumerErrors ConsumerErrors
 	for err := range child.errors {
-		errors = append(errors, err)
+		consumerErrors = append(consumerErrors, err)
 	}
 
-	if len(errors) > 0 {
-		return errors
+	if len(consumerErrors) > 0 {
+		return consumerErrors
 	}
 	return nil
 }
@@ -451,6 +468,7 @@
 		}
 
 		for i, msg := range msgs {
+			child.interceptors(msg)
 		messageSelect:
 			select {
 			case <-child.dying:
@@ -464,6 +482,7 @@
 					child.broker.acks.Done()
 				remainingLoop:
 					for _, msg = range msgs[i:] {
+						child.interceptors(msg)
 						select {
 						case child.messages <- msg:
 						case <-child.dying:
@@ -586,6 +605,8 @@
 
 	consumerBatchSizeMetric.Update(int64(nRecs))
 
+	child.preferredReadReplica = block.PreferredReadReplica
+
 	if nRecs == 0 {
 		partialTrailingMessage, err := block.isPartial()
 		if err != nil {
@@ -623,7 +644,7 @@
 	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
 	abortedTransactions := block.getAbortedTransactions()
 
-	messages := []*ConsumerMessage{}
+	var messages []*ConsumerMessage
 	for _, records := range block.RecordsSet {
 		switch records.recordsType {
 		case legacyRecords:
@@ -693,6 +714,12 @@
 	return messages, nil
 }
 
+func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
+	for _, interceptor := range child.conf.Consumer.Interceptors {
+		msg.safelyApplyInterceptor(interceptor)
+	}
+}
+
 type brokerConsumer struct {
 	consumer         *consumer
 	broker           *Broker
@@ -761,7 +788,7 @@
 	close(bc.newSubscriptions)
 }
 
-//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
 func (bc *brokerConsumer) subscriptionConsumer() {
 	<-bc.wait // wait for our first piece of work
 
@@ -776,7 +803,6 @@
 		}
 
 		response, err := bc.fetchNewMessages()
-
 		if err != nil {
 			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
 			bc.abort(err)
@@ -810,15 +836,27 @@
 	}
 }
 
-//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
 func (bc *brokerConsumer) handleResponses() {
 	for child := range bc.subscriptions {
 		result := child.responseResult
 		child.responseResult = nil
 
+		if result == nil {
+			if preferredBroker, err := child.preferredBroker(); err == nil {
+				if bc.broker.ID() != preferredBroker.ID() {
+					// not an error but needs redispatching to consume from prefered replica
+					child.trigger <- none{}
+					delete(bc.subscriptions, child)
+				}
+			}
+			continue
+		}
+
+		// Discard any replica preference.
+		child.preferredReadReplica = -1
+
 		switch result {
-		case nil:
-			// no-op
 		case errTimedOut:
 			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
 				bc.broker.ID(), child.topic, child.partition)
@@ -887,6 +925,21 @@
 		request.Version = 4
 		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
 	}
+	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 7
+		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
+		// and the epoch to -1 tells the broker not to generate as session ID we're going
+		// to just ignore anyway.
+		request.SessionID = 0
+		request.SessionEpoch = -1
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 10
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
+		request.Version = 11
+		request.RackID = bc.consumer.conf.RackID
+	}
 
 	for child := range bc.subscriptions {
 		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
index 8de9513..2bf236a 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -38,6 +38,9 @@
 	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
 	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
 	// commit failures.
+	// This method should be called inside an infinite loop, when a
+	// server-side rebalance happens, the consumer session will need to be
+	// recreated to get the new claims.
 	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
 
 	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
@@ -63,6 +66,8 @@
 	lock      sync.Mutex
 	closed    chan none
 	closeOnce sync.Once
+
+	userData []byte
 }
 
 // NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
@@ -118,9 +123,6 @@
 	c.closeOnce.Do(func() {
 		close(c.closed)
 
-		c.lock.Lock()
-		defer c.lock.Unlock()
-
 		// leave group
 		if e := c.leave(); e != nil {
 			err = e
@@ -171,6 +173,11 @@
 		return err
 	}
 
+	// loop check topic partition numbers changed
+	// will trigger rebalance when any topic partitions number had changed
+	// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
+	go c.loopCheckPartitionNumbers(topics, sess)
+
 	// Wait for session exit signal
 	<-sess.ctx.Done()
 
@@ -248,40 +255,41 @@
 	}
 
 	// Sync consumer group
-	sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
+	groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
 	if err != nil {
 		_ = coordinator.Close()
 		return nil, err
 	}
-	switch sync.Err {
+	switch groupRequest.Err {
 	case ErrNoError:
 	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
 		c.memberID = ""
 		return c.newSession(ctx, topics, handler, retries)
 	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
 		if retries <= 0 {
-			return nil, sync.Err
+			return nil, groupRequest.Err
 		}
 
 		return c.retryNewSession(ctx, topics, handler, retries, true)
 	case ErrRebalanceInProgress: // retry after backoff
 		if retries <= 0 {
-			return nil, sync.Err
+			return nil, groupRequest.Err
 		}
 
 		return c.retryNewSession(ctx, topics, handler, retries, false)
 	default:
-		return nil, sync.Err
+		return nil, groupRequest.Err
 	}
 
 	// Retrieve and sort claims
 	var claims map[string][]int32
-	if len(sync.MemberAssignment) > 0 {
-		members, err := sync.GetMemberAssignment()
+	if len(groupRequest.MemberAssignment) > 0 {
+		members, err := groupRequest.GetMemberAssignment()
 		if err != nil {
 			return nil, err
 		}
 		claims = members.Topics
+		c.userData = members.UserData
 
 		for _, partitions := range claims {
 			sort.Sort(int32Slice(partitions))
@@ -303,9 +311,14 @@
 		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
 	}
 
+	// use static user-data if configured, otherwise use consumer-group userdata from the last sync
+	userData := c.config.Consumer.Group.Member.UserData
+	if len(userData) == 0 {
+		userData = c.userData
+	}
 	meta := &ConsumerGroupMemberMetadata{
 		Topics:   topics,
-		UserData: c.config.Consumer.Group.Member.UserData,
+		UserData: userData,
 	}
 	strategy := c.config.Consumer.Group.Rebalance.Strategy
 	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
@@ -321,13 +334,17 @@
 		MemberId:     c.memberID,
 		GenerationId: generationID,
 	}
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
 	for memberID, topics := range plan {
-		err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
-			Topics: topics,
-		})
+		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
 		if err != nil {
 			return nil, err
 		}
+		assignment.UserData = userDataBytes
+		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+			return nil, err
+		}
 	}
 	return coordinator.SyncGroup(req)
 }
@@ -362,8 +379,10 @@
 	return strategy.Plan(members, topics)
 }
 
-// Leaves the cluster, called by Close, protected by lock.
+// Leaves the cluster, called by Close.
 func (c *consumerGroup) leave() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
 	if c.memberID == "" {
 		return nil
 	}
@@ -395,12 +414,6 @@
 }
 
 func (c *consumerGroup) handleError(err error, topic string, partition int32) {
-	select {
-	case <-c.closed:
-		return
-	default:
-	}
-
 	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
 		err = &ConsumerError{
 			Topic:     topic,
@@ -409,14 +422,67 @@
 		}
 	}
 
-	if c.config.Consumer.Return.Errors {
-		select {
-		case c.errors <- err:
-		default:
-		}
-	} else {
+	if !c.config.Consumer.Return.Errors {
 		Logger.Println(err)
+		return
 	}
+
+	select {
+	case <-c.closed:
+		// consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
+	}
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
+	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
+	defer session.cancel()
+	defer pause.Stop()
+	var oldTopicToPartitionNum map[string]int
+	var err error
+	if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
+		return
+	}
+	for {
+		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+			return
+		} else {
+			for topic, num := range oldTopicToPartitionNum {
+				if newTopicToPartitionNum[topic] != num {
+					return // trigger the end of the session on exit
+				}
+			}
+		}
+		select {
+		case <-pause.C:
+		case <-session.ctx.Done():
+			Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
+			// if session closed by other, should be exited
+			return
+		case <-c.closed:
+			return
+		}
+	}
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+	topicToPartitionNum := make(map[string]int, len(topics))
+	for _, topic := range topics {
+		if partitionNum, err := c.client.Partitions(topic); err != nil {
+			Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
+			return nil, err
+		} else {
+			topicToPartitionNum[topic] = len(partitionNum)
+		}
+	}
+	return topicToPartitionNum, nil
 }
 
 // --------------------------------------------------------------------
@@ -447,6 +513,11 @@
 	// message twice, and your processing should ideally be idempotent.
 	MarkOffset(topic string, partition int32, offset int64, metadata string)
 
+	// Commit the offset to the backend
+	//
+	// Note: calling Commit performs a blocking synchronous operation.
+	Commit()
+
 	// ResetOffset resets to the provided offset, alongside a metadata string that
 	// represents the state of the partition consumer at that point in time. Reset
 	// acts as a counterpart to MarkOffset, the difference being that it allows to
@@ -558,6 +629,10 @@
 	}
 }
 
+func (s *consumerGroupSession) Commit() {
+	s.offsets.Commit()
+}
+
 func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
 	if pom := s.offsets.findPOM(topic, partition); pom != nil {
 		pom.ResetOffset(offset, metadata)
@@ -657,6 +732,9 @@
 	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
 	defer pause.Stop()
 
+	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
+	defer retryBackoff.Stop()
+
 	retries := s.parent.config.Metadata.Retry.Max
 	for {
 		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
@@ -665,11 +743,11 @@
 				s.parent.handleError(err, "", -1)
 				return
 			}
-
+			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
 			select {
 			case <-s.hbDying:
 				return
-			case <-time.After(s.parent.config.Metadata.Retry.Backoff):
+			case <-retryBackoff.C:
 				retries--
 			}
 			continue
@@ -694,7 +772,7 @@
 		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
 			return
 		default:
-			s.parent.handleError(err, "", -1)
+			s.parent.handleError(resp.Err, "", -1)
 			return
 		}
 
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
index 2d02cc3..21b11e9 100644
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ConsumerGroupMemberMetadata holds the metadata for consumer group
+// ConsumerGroupMemberMetadata holds the metadata for consumer group
 type ConsumerGroupMemberMetadata struct {
 	Version  int16
 	Topics   []string
@@ -37,7 +37,7 @@
 	return nil
 }
 
-//ConsumerGroupMemberAssignment holds the member assignment for a consume group
+// ConsumerGroupMemberAssignment holds the member assignment for a consume group
 type ConsumerGroupMemberAssignment struct {
 	Version  int16
 	Topics   map[string][]int32
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
index a8dcaef..5c18e04 100644
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -1,6 +1,6 @@
 package sarama
 
-//ConsumerMetadataRequest is used for metadata requests
+// ConsumerMetadataRequest is used for metadata requests
 type ConsumerMetadataRequest struct {
 	ConsumerGroup string
 }
@@ -29,6 +29,10 @@
 	return 0
 }
 
+func (r *ConsumerMetadataRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
 	return V0_8_2_0
 }
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
index f39a871..7fe0cf9 100644
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -5,7 +5,7 @@
 	"strconv"
 )
 
-//ConsumerMetadataResponse holds the response for a consumer group meta data requests
+// ConsumerMetadataResponse holds the response for a consumer group meta data requests
 type ConsumerMetadataResponse struct {
 	Err             KError
 	Coordinator     *Broker
@@ -73,6 +73,10 @@
 	return 0
 }
 
+func (r *ConsumerMetadataResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
 	return V0_8_2_0
 }
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go
index 9b75ab5..244a821 100644
--- a/vendor/github.com/Shopify/sarama/control_record.go
+++ b/vendor/github.com/Shopify/sarama/control_record.go
@@ -1,14 +1,14 @@
 package sarama
 
-//ControlRecordType ...
+// ControlRecordType ...
 type ControlRecordType int
 
 const (
-	//ControlRecordAbort is a control record for abort
+	// ControlRecordAbort is a control record for abort
 	ControlRecordAbort ControlRecordType = iota
-	//ControlRecordCommit is a control record for commit
+	// ControlRecordCommit is a control record for commit
 	ControlRecordCommit
-	//ControlRecordUnknown is a control record of unknown type
+	// ControlRecordUnknown is a control record of unknown type
 	ControlRecordUnknown
 )
 
@@ -23,16 +23,6 @@
 
 func (cr *ControlRecord) decode(key, value packetDecoder) error {
 	var err error
-	cr.Version, err = value.getInt16()
-	if err != nil {
-		return err
-	}
-
-	cr.CoordinatorEpoch, err = value.getInt32()
-	if err != nil {
-		return err
-	}
-
 	// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
 	// Either way, all these version can only be 0 for now
 	cr.Version, err = key.getInt16()
@@ -55,6 +45,18 @@
 		// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
 		cr.Type = ControlRecordUnknown
 	}
+	// we want to parse value only if we are decoding control record of known type
+	if cr.Type != ControlRecordUnknown {
+		cr.Version, err = value.getInt16()
+		if err != nil {
+			return err
+		}
+
+		cr.CoordinatorEpoch, err = value.getInt32()
+		if err != nil {
+			return err
+		}
+	}
 	return nil
 }
 
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
index 38189a3..32236e5 100644
--- a/vendor/github.com/Shopify/sarama/crc32_field.go
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -72,6 +72,7 @@
 
 	return nil
 }
+
 func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
 	var tab *crc32.Table
 	switch c.polynomial {
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
index af321e9..46fb044 100644
--- a/vendor/github.com/Shopify/sarama/create_partitions_request.go
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -67,6 +67,10 @@
 	return 0
 }
 
+func (r *CreatePartitionsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
index bb18204..12ce788 100644
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -63,6 +63,10 @@
 	return 0
 }
 
+func (r *CreatePartitionsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
index 709c0a4..287acd0 100644
--- a/vendor/github.com/Shopify/sarama/create_topics_request.go
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -79,6 +79,10 @@
 	return c.Version
 }
 
+func (r *CreateTopicsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
index a493e02..7e1448a 100644
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -70,6 +70,10 @@
 	return c.Version
 }
 
+func (c *CreateTopicsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
 	switch c.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
index eaccbfc..af45fda 100644
--- a/vendor/github.com/Shopify/sarama/decompress.go
+++ b/vendor/github.com/Shopify/sarama/decompress.go
@@ -7,7 +7,7 @@
 	"io/ioutil"
 	"sync"
 
-	"github.com/eapache/go-xerial-snappy"
+	snappy "github.com/eapache/go-xerial-snappy"
 	"github.com/pierrec/lz4"
 )
 
@@ -26,34 +26,32 @@
 	case CompressionNone:
 		return data, nil
 	case CompressionGZIP:
-		var (
-			err        error
-			reader     *gzip.Reader
-			readerIntf = gzipReaderPool.Get()
-		)
-		if readerIntf != nil {
-			reader = readerIntf.(*gzip.Reader)
-		} else {
+		var err error
+		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
+		if !ok {
 			reader, err = gzip.NewReader(bytes.NewReader(data))
-			if err != nil {
-				return nil, err
-			}
+		} else {
+			err = reader.Reset(bytes.NewReader(data))
+		}
+
+		if err != nil {
+			return nil, err
 		}
 
 		defer gzipReaderPool.Put(reader)
 
-		if err := reader.Reset(bytes.NewReader(data)); err != nil {
-			return nil, err
-		}
-
 		return ioutil.ReadAll(reader)
 	case CompressionSnappy:
 		return snappy.Decode(data)
 	case CompressionLZ4:
-		reader := lz4ReaderPool.Get().(*lz4.Reader)
+		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
+		if !ok {
+			reader = lz4.NewReader(bytes.NewReader(data))
+		} else {
+			reader.Reset(bytes.NewReader(data))
+		}
 		defer lz4ReaderPool.Put(reader)
 
-		reader.Reset(bytes.NewReader(data))
 		return ioutil.ReadAll(reader)
 	case CompressionZSTD:
 		return zstdDecompress(nil, data)
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
index 305a324..4ac8bbe 100644
--- a/vendor/github.com/Shopify/sarama/delete_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go
@@ -21,6 +21,10 @@
 	return 0
 }
 
+func (r *DeleteGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
 	return V1_1_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
index c067ebb..5e7b1ed 100644
--- a/vendor/github.com/Shopify/sarama/delete_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go
@@ -65,6 +65,10 @@
 	return 0
 }
 
+func (r *DeleteGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
 	return V1_1_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
index 93efafd..dc106b1 100644
--- a/vendor/github.com/Shopify/sarama/delete_records_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_records_request.go
@@ -77,6 +77,10 @@
 	return 0
 }
 
+func (d *DeleteRecordsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
index 733a58b..d530b4c 100644
--- a/vendor/github.com/Shopify/sarama/delete_records_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_records_response.go
@@ -80,6 +80,10 @@
 	return 0
 }
 
+func (d *DeleteRecordsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
index 911f67d..ba6780a 100644
--- a/vendor/github.com/Shopify/sarama/delete_topics_request.go
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -38,6 +38,10 @@
 	return d.Version
 }
 
+func (d *DeleteTopicsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
index 3422546..733961a 100644
--- a/vendor/github.com/Shopify/sarama/delete_topics_response.go
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -68,6 +68,10 @@
 	return d.Version
 }
 
+func (d *DeleteTopicsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
 	switch d.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
index ccb587b..4c34880 100644
--- a/vendor/github.com/Shopify/sarama/describe_configs_request.go
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -61,7 +61,6 @@
 		r.Resources[i].Name = name
 
 		confLength, err := pd.getArrayLength()
-
 		if err != nil {
 			return err
 		}
@@ -100,6 +99,10 @@
 	return r.Version
 }
 
+func (r *DescribeConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
index 5737232..928f5a5 100644
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -112,6 +112,10 @@
 	return r.Version
 }
 
+func (r *DescribeConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -220,7 +224,7 @@
 	return nil
 }
 
-//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
 func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
 	if version == 0 {
 		r.Source = SourceUnknown
@@ -249,12 +253,16 @@
 			return err
 		}
 		r.Default = defaultB
+		if defaultB {
+			r.Source = SourceDefault
+		}
 	} else {
 		source, err := pd.getInt8()
 		if err != nil {
 			return err
 		}
 		r.Source = ConfigSource(source)
+		r.Default = r.Source == SourceDefault
 	}
 
 	sensitive, err := pd.getBool()
@@ -277,7 +285,6 @@
 			}
 			r.Synonyms[i] = s
 		}
-
 	}
 	return nil
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
index 1fb3567..f8962da 100644
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -21,6 +21,10 @@
 	return 0
 }
 
+func (r *DescribeGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
index 542b3a9..bc242e4 100644
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -43,6 +43,10 @@
 	return 0
 }
 
+func (r *DescribeGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..c0bf04e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	// If this is an empty array, all topics will be queried
+	DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+	Topic        string
+	PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+	length := len(r.DescribeTopics)
+	if length == 0 {
+		// In order to query all topics we must send null
+		length = -1
+	}
+
+	if err := pe.putArrayLength(length); err != nil {
+		return err
+	}
+
+	for _, d := range r.DescribeTopics {
+		if err := pe.putString(d.Topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	topics := make([]DescribeLogDirsRequestTopic, n)
+	for i := 0; i < n; i++ {
+		topics[i] = DescribeLogDirsRequestTopic{}
+
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		topics[i].Topic = topic
+
+		pIDs, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		topics[i].PartitionIDs = pIDs
+	}
+	r.DescribeTopics = topics
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..411da38
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
@@ -0,0 +1,229 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+	ThrottleTime time.Duration
+
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	LogDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+		return err
+	}
+
+	for _, dir := range r.LogDirs {
+		if err := dir.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	// Decode array of DescribeLogDirsResponseDirMetadata
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+	for i := 0; i < n; i++ {
+		dir := DescribeLogDirsResponseDirMetadata{}
+		if err := dir.decode(pd, version); err != nil {
+			return err
+		}
+		r.LogDirs[i] = dir
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+	ErrorCode KError
+
+	// The absolute log directory path
+	Path   string
+	Topics []DescribeLogDirsResponseTopic
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.ErrorCode))
+
+	if err := pe.putString(r.Path); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Topics)); err != nil {
+		return err
+	}
+	for _, topic := range r.Topics {
+		if err := topic.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = KError(errCode)
+
+	path, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Path = path
+
+	// Decode array of DescribeLogDirsResponseTopic
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]DescribeLogDirsResponseTopic, n)
+	for i := 0; i < n; i++ {
+		t := DescribeLogDirsResponseTopic{}
+
+		if err := t.decode(pd, version); err != nil {
+			return err
+		}
+
+		r.Topics[i] = t
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+	Topic      string
+	Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Topic); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
+		return err
+	}
+	for _, partition := range r.Partitions {
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Topic = t
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+	for i := 0; i < n; i++ {
+		p := DescribeLogDirsResponsePartition{}
+		if err := p.decode(pd, version); err != nil {
+			return err
+		}
+		r.Partitions[i] = p
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+	PartitionID int32
+
+	// The size of the log segments of the partition in bytes.
+	Size int64
+
+	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+	// current replica's LEO (if it is the future log for the partition)
+	OffsetLag int64
+
+	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+	// the replica in the future.
+	IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt32(r.PartitionID)
+	pe.putInt64(r.Size)
+	pe.putInt64(r.OffsetLag)
+	pe.putBool(r.IsTemporary)
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+	pID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.PartitionID = pID
+
+	size, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.Size = size
+
+	lag, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.OffsetLag = lag
+
+	isTemp, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.IsTemporary = isTemp
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
new file mode 100644
index 0000000..b5b5940
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
+type DescribeUserScramCredentialsRequest struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	// If this is an empty array, all users will be queried
+	DescribeUsers []DescribeUserScramCredentialsRequestUser
+}
+
+// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
+type DescribeUserScramCredentialsRequestUser struct {
+	Name string
+}
+
+func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	pe.putCompactArrayLength(len(r.DescribeUsers))
+	for _, d := range r.DescribeUsers {
+		if err := pe.putCompactString(d.Name); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
+	for i := 0; i < n; i++ {
+		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
+		if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) key() int16 {
+	return 50
+}
+
+func (r *DescribeUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
new file mode 100644
index 0000000..2656c2f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
@@ -0,0 +1,168 @@
+package sarama
+
+import "time"
+
+type ScramMechanismType int8
+
+const (
+	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
+	SCRAM_MECHANISM_SHA_256                           // 1
+	SCRAM_MECHANISM_SHA_512                           // 2
+)
+
+func (s ScramMechanismType) String() string {
+	switch s {
+	case 1:
+		return SASLTypeSCRAMSHA256
+	case 2:
+		return SASLTypeSCRAMSHA512
+	default:
+		return "Unknown"
+	}
+}
+
+type DescribeUserScramCredentialsResponse struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	ThrottleTime time.Duration
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	Results []*DescribeUserScramCredentialsResult
+}
+
+type DescribeUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	CredentialInfos []*UserScramCredentialsResponseInfo
+}
+
+type UserScramCredentialsResponseInfo struct {
+	Mechanism  ScramMechanismType
+	Iterations int32
+}
+
+func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.Results))
+	for _, u := range r.Results {
+		if err := pe.putCompactString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
+			return err
+		}
+
+		pe.putCompactArrayLength(len(u.CredentialInfos))
+		for _, c := range u.CredentialInfos {
+			pe.putInt8(int8(c.Mechanism))
+			pe.putInt32(c.Iterations)
+			pe.putEmptyTaggedFieldArray()
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numUsers, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numUsers > 0 {
+		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
+		for i := 0; i < numUsers; i++ {
+			r.Results[i] = &DescribeUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getCompactString(); err != nil {
+				return err
+			}
+
+			errorCode, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+			r.Results[i].ErrorCode = KError(errorCode)
+			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+				return err
+			}
+
+			numCredentialInfos, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
+			for j := 0; j < numCredentialInfos; j++ {
+				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
+				scramMechanism, err := pd.getInt8()
+				if err != nil {
+					return err
+				}
+				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
+				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
+					return err
+				}
+				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) key() int16 {
+	return 50
+}
+
+func (r *DescribeUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
index 3f4d569..7bf9ff9 100644
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -2,7 +2,7 @@
 
 up:
   - go:
-      version: '1.12'
+      version: '1.16'
 
 commands:
   test:
diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml
new file mode 100644
index 0000000..8e9c24e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/docker-compose.yml
@@ -0,0 +1,134 @@
+version: '3.7'
+services:
+  zookeeper-1:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '1'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  zookeeper-2:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '2'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  zookeeper-3:
+    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      ZOOKEEPER_SERVER_ID: '3'
+      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
+      ZOOKEEPER_CLIENT_PORT: '2181'
+      ZOOKEEPER_PEER_PORT: '2888'
+      ZOOKEEPER_LEADER_PORT: '3888'
+      ZOOKEEPER_INIT_LIMIT: '10'
+      ZOOKEEPER_SYNC_LIMIT: '5'
+      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
+  kafka-1:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '1'
+      KAFKA_BROKER_RACK: '1'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-2:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '2'
+      KAFKA_BROKER_RACK: '2'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-3:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '3'
+      KAFKA_BROKER_RACK: '3'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-4:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '4'
+      KAFKA_BROKER_RACK: '4'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  kafka-5:
+    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
+    restart: always
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
+      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
+      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
+      KAFKA_BROKER_ID: '5'
+      KAFKA_BROKER_RACK: '5'
+      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
+      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+      KAFKA_DELETE_TOPIC_ENABLE: 'true'
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
+  toxiproxy:
+    image: 'shopify/toxiproxy:2.1.4'
+    ports:
+      # The tests themselves actually start the proxies on these ports
+      - '29091:29091'
+      - '29092:29092'
+      - '29093:29093'
+      - '29094:29094'
+      - '29095:29095'
+      # This is the toxiproxy API port
+      - '8474:8474'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
index 7ce3bc0..dab54f8 100644
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -12,6 +12,11 @@
 	encode(pe packetEncoder) error
 }
 
+type encoderWithHeader interface {
+	encoder
+	headerVersion() int16
+}
+
 // Encode takes an Encoder and turns it into bytes while potentially recording metrics.
 func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
 	if e == nil {
@@ -40,7 +45,7 @@
 	return realEnc.raw, nil
 }
 
-// Decoder is the interface that wraps the basic Decode method.
+// decoder is the interface that wraps the basic Decode method.
 // Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
 type decoder interface {
 	decode(pd packetDecoder) error
@@ -50,7 +55,7 @@
 	decode(pd packetDecoder, version int16) error
 }
 
-// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
 // interpreted using Kafka's encoding rules.
 func decode(buf []byte, in decoder) error {
 	if buf == nil {
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
index 2cd9b50..6635425 100644
--- a/vendor/github.com/Shopify/sarama/end_txn_request.go
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -45,6 +45,10 @@
 	return 0
 }
 
+func (r *EndTxnRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *EndTxnRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
index 33b27e3..7639767 100644
--- a/vendor/github.com/Shopify/sarama/end_txn_response.go
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -39,6 +39,10 @@
 	return 0
 }
 
+func (r *EndTxnResponse) headerVersion() int16 {
+	return 0
+}
+
 func (e *EndTxnResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
index c6a8be7..0fca0a3 100644
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -9,6 +9,9 @@
 // or otherwise failed to respond.
 var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
 
+// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
+var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
+
 // ErrClosedClient is the error returned when a method is called on a client that has been closed.
 var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
 
@@ -49,6 +52,9 @@
 // the metadata.
 var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
 
+// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
+var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
+
 // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
 // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
 type PacketEncodingError struct {
@@ -87,13 +93,21 @@
 }
 
 func (mErr MultiError) Error() string {
-	var errString = ""
+	errString := ""
 	for _, err := range *mErr.Errors {
 		errString += err.Error() + ","
 	}
 	return errString
 }
 
+func (mErr MultiError) PrettyError() string {
+	errString := ""
+	for _, err := range *mErr.Errors {
+		errString += err.Error() + "\n"
+	}
+	return errString
+}
+
 // ErrDeleteRecords is the type of error returned when fail to delete the required records
 type ErrDeleteRecords struct {
 	MultiError
@@ -103,6 +117,14 @@
 	return "kafka server: failed to delete records " + err.MultiError.Error()
 }
 
+type ErrReassignPartitions struct {
+	MultiError
+}
+
+func (err ErrReassignPartitions) Error() string {
+	return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
+}
+
 // Numeric error codes returned by the Kafka server.
 const (
 	ErrNoError                            KError = 0
@@ -188,6 +210,13 @@
 	ErrMemberIdRequired                   KError = 79
 	ErrPreferredLeaderNotAvailable        KError = 80
 	ErrGroupMaxSizeReached                KError = 81
+	ErrFencedInstancedId                  KError = 82
+	ErrEligibleLeadersNotAvailable        KError = 83
+	ErrElectionNotNeeded                  KError = 84
+	ErrNoReassignmentInProgress           KError = 85
+	ErrGroupSubscribedToTopic             KError = 86
+	ErrInvalidRecord                      KError = 87
+	ErrUnstableOffsetCommit               KError = 88
 )
 
 func (err KError) Error() string {
@@ -360,6 +389,20 @@
 		return "kafka server: The preferred leader was not available"
 	case ErrGroupMaxSizeReached:
 		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
+	case ErrFencedInstancedId:
+		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
+	case ErrEligibleLeadersNotAvailable:
+		return "kafka server: Eligible topic partition leaders are not available."
+	case ErrElectionNotNeeded:
+		return "kafka server: Leader election not needed for topic partition."
+	case ErrNoReassignmentInProgress:
+		return "kafka server: No partition reassignment is in progress."
+	case ErrGroupSubscribedToTopic:
+		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."
+	case ErrInvalidRecord:
+		return "kafka server: This record has failed the validation on broker and hence will be rejected."
+	case ErrUnstableOffsetCommit:
+		return "kafka server: There are unstable offsets that need to be cleared."
 	}
 
 	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
index 4db9ddd..f893aef 100644
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -1,20 +1,41 @@
 package sarama
 
 type fetchRequestBlock struct {
-	fetchOffset int64
-	maxBytes    int32
+	Version            int16
+	currentLeaderEpoch int32
+	fetchOffset        int64
+	logStartOffset     int64
+	maxBytes           int32
 }
 
-func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
+	b.Version = version
+	if b.Version >= 9 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
 	pe.putInt64(b.fetchOffset)
+	if b.Version >= 5 {
+		pe.putInt64(b.logStartOffset)
+	}
 	pe.putInt32(b.maxBytes)
 	return nil
 }
 
-func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Version = version
+	if b.Version >= 9 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
 	if b.fetchOffset, err = pd.getInt64(); err != nil {
 		return err
 	}
+	if b.Version >= 5 {
+		if b.logStartOffset, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
 	if b.maxBytes, err = pd.getInt32(); err != nil {
 		return err
 	}
@@ -25,12 +46,16 @@
 // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
 // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
 type FetchRequest struct {
-	MaxWaitTime int32
-	MinBytes    int32
-	MaxBytes    int32
-	Version     int16
-	Isolation   IsolationLevel
-	blocks      map[string]map[int32]*fetchRequestBlock
+	MaxWaitTime  int32
+	MinBytes     int32
+	MaxBytes     int32
+	Version      int16
+	Isolation    IsolationLevel
+	SessionID    int32
+	SessionEpoch int32
+	blocks       map[string]map[int32]*fetchRequestBlock
+	forgotten    map[string][]int32
+	RackID       string
 }
 
 type IsolationLevel int8
@@ -50,6 +75,10 @@
 	if r.Version >= 4 {
 		pe.putInt8(int8(r.Isolation))
 	}
+	if r.Version >= 7 {
+		pe.putInt32(r.SessionID)
+		pe.putInt32(r.SessionEpoch)
+	}
 	err = pe.putArrayLength(len(r.blocks))
 	if err != nil {
 		return err
@@ -65,17 +94,44 @@
 		}
 		for partition, block := range blocks {
 			pe.putInt32(partition)
-			err = block.encode(pe)
+			err = block.encode(pe, r.Version)
 			if err != nil {
 				return err
 			}
 		}
 	}
+	if r.Version >= 7 {
+		err = pe.putArrayLength(len(r.forgotten))
+		if err != nil {
+			return err
+		}
+		for topic, partitions := range r.forgotten {
+			err = pe.putString(topic)
+			if err != nil {
+				return err
+			}
+			err = pe.putArrayLength(len(partitions))
+			if err != nil {
+				return err
+			}
+			for _, partition := range partitions {
+				pe.putInt32(partition)
+			}
+		}
+	}
+	if r.Version >= 11 {
+		err = pe.putString(r.RackID)
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
 func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
+
 	if _, err = pd.getInt32(); err != nil {
 		return err
 	}
@@ -97,6 +153,16 @@
 		}
 		r.Isolation = IsolationLevel(isolation)
 	}
+	if r.Version >= 7 {
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.SessionEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
 	topicCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -121,12 +187,47 @@
 				return err
 			}
 			fetchBlock := &fetchRequestBlock{}
-			if err = fetchBlock.decode(pd); err != nil {
+			if err = fetchBlock.decode(pd, r.Version); err != nil {
 				return err
 			}
 			r.blocks[topic][partition] = fetchBlock
 		}
 	}
+
+	if r.Version >= 7 {
+		forgottenCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.forgotten = make(map[string][]int32)
+		for i := 0; i < forgottenCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.forgotten[topic] = make([]int32, partitionCount)
+
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.forgotten[topic][j] = partition
+			}
+		}
+	}
+
+	if r.Version >= 11 {
+		r.RackID, err = pd.getString()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -138,18 +239,34 @@
 	return r.Version
 }
 
+func (r *FetchRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *FetchRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
+	case 0:
+		return MinVersion
 	case 1:
 		return V0_9_0_0
 	case 2:
 		return V0_10_0_0
 	case 3:
 		return V0_10_1_0
-	case 4:
+	case 4, 5:
 		return V0_11_0_0
+	case 6:
+		return V1_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 11:
+		return V2_3_0_0
 	default:
-		return MinVersion
+		return MaxVersion
 	}
 }
 
@@ -158,13 +275,21 @@
 		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
 	}
 
+	if r.Version >= 7 && r.forgotten == nil {
+		r.forgotten = make(map[string][]int32)
+	}
+
 	if r.blocks[topic] == nil {
 		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
 	}
 
 	tmp := new(fetchRequestBlock)
+	tmp.Version = r.Version
 	tmp.maxBytes = maxBytes
 	tmp.fetchOffset = fetchOffset
+	if r.Version >= 9 {
+		tmp.currentLeaderEpoch = int32(-1)
+	}
 
 	r.blocks[topic][partitionID] = tmp
 }
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
index 3afc187..54b8828 100644
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -30,13 +30,15 @@
 }
 
 type FetchResponseBlock struct {
-	Err                 KError
-	HighWaterMarkOffset int64
-	LastStableOffset    int64
-	AbortedTransactions []*AbortedTransaction
-	Records             *Records // deprecated: use FetchResponseBlock.RecordsSet
-	RecordsSet          []*Records
-	Partial             bool
+	Err                  KError
+	HighWaterMarkOffset  int64
+	LastStableOffset     int64
+	LogStartOffset       int64
+	AbortedTransactions  []*AbortedTransaction
+	PreferredReadReplica int32
+	Records              *Records // deprecated: use FetchResponseBlock.RecordsSet
+	RecordsSet           []*Records
+	Partial              bool
 }
 
 func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
@@ -57,6 +59,13 @@
 			return err
 		}
 
+		if version >= 5 {
+			b.LogStartOffset, err = pd.getInt64()
+			if err != nil {
+				return err
+			}
+		}
+
 		numTransact, err := pd.getArrayLength()
 		if err != nil {
 			return err
@@ -75,6 +84,15 @@
 		}
 	}
 
+	if version >= 11 {
+		b.PreferredReadReplica, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.PreferredReadReplica = -1
+	}
+
 	recordsSize, err := pd.getInt32()
 	if err != nil {
 		return err
@@ -166,6 +184,10 @@
 	if version >= 4 {
 		pe.putInt64(b.LastStableOffset)
 
+		if version >= 5 {
+			pe.putInt64(b.LogStartOffset)
+		}
+
 		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
 			return err
 		}
@@ -176,6 +198,10 @@
 		}
 	}
 
+	if version >= 11 {
+		pe.putInt32(b.PreferredReadReplica)
+	}
+
 	pe.push(&lengthField{})
 	for _, records := range b.RecordsSet {
 		err = records.encode(pe)
@@ -200,7 +226,9 @@
 type FetchResponse struct {
 	Blocks        map[string]map[int32]*FetchResponseBlock
 	ThrottleTime  time.Duration
-	Version       int16 // v1 requires 0.9+, v2 requires 0.10+
+	ErrorCode     int16
+	SessionID     int32
+	Version       int16
 	LogAppendTime bool
 	Timestamp     time.Time
 }
@@ -216,6 +244,17 @@
 		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
 	}
 
+	if r.Version >= 7 {
+		r.ErrorCode, err = pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
 	numTopics, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -258,6 +297,11 @@
 		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
 	}
 
+	if r.Version >= 7 {
+		pe.putInt16(r.ErrorCode)
+		pe.putInt32(r.SessionID)
+	}
+
 	err = pe.putArrayLength(len(r.Blocks))
 	if err != nil {
 		return err
@@ -281,7 +325,6 @@
 				return err
 			}
 		}
-
 	}
 	return nil
 }
@@ -294,18 +337,34 @@
 	return r.Version
 }
 
+func (r *FetchResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *FetchResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
+	case 0:
+		return MinVersion
 	case 1:
 		return V0_9_0_0
 	case 2:
 		return V0_10_0_0
 	case 3:
 		return V0_10_1_0
-	case 4:
+	case 4, 5:
 		return V0_11_0_0
+	case 6:
+		return V1_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 11:
+		return V2_3_0_0
 	default:
-		return MinVersion
+		return MaxVersion
 	}
 }
 
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
index ff2ad20..597bcbf 100644
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -51,6 +51,10 @@
 	return f.Version
 }
 
+func (r *FindCoordinatorRequest) headerVersion() int16 {
+	return 1
+}
+
 func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
 	switch f.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
index 9c900e8..83a648a 100644
--- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
@@ -82,6 +82,10 @@
 	return f.Version
 }
 
+func (r *FindCoordinatorResponse) headerVersion() int16 {
+	return 0
+}
+
 func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
 	switch f.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
index 8c45155..ccbd8e2 100644
--- a/vendor/github.com/Shopify/sarama/go.mod
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -1,24 +1,28 @@
 module github.com/Shopify/sarama
 
+go 1.13
+
 require (
-	github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798
 	github.com/Shopify/toxiproxy v2.1.4+incompatible
 	github.com/davecgh/go-spew v1.1.1
-	github.com/eapache/go-resiliency v1.1.0
+	github.com/eapache/go-resiliency v1.2.0
 	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
 	github.com/eapache/queue v1.1.0
-	github.com/golang/snappy v0.0.1 // indirect
-	github.com/hashicorp/go-uuid v1.0.1 // indirect
-	github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03
-	github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41
-	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
-	github.com/stretchr/testify v1.3.0
-	github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
-	github.com/xdg/stringprep v1.0.0 // indirect
-	golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
-	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
-	gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
-	gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
-	gopkg.in/jcmturner/gokrb5.v7 v7.2.3
-	gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
+	github.com/fortytw2/leaktest v1.3.0
+	github.com/frankban/quicktest v1.11.3 // indirect
+	github.com/jcmturner/gofork v1.0.0
+	github.com/jcmturner/gokrb5/v8 v8.4.2
+	github.com/klauspost/compress v1.12.2
+	github.com/kr/text v0.2.0 // indirect
+	github.com/pierrec/lz4 v2.6.0+incompatible
+	github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
+	github.com/stretchr/testify v1.7.0
+	github.com/xdg/scram v1.0.3
+	github.com/xdg/stringprep v1.0.3 // indirect
+	golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect
+	golang.org/x/net v0.0.0-20210614182718-04defd469f4e
+	golang.org/x/text v0.3.6 // indirect
+	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
 )
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
index 4dbc6d2..a497760 100644
--- a/vendor/github.com/Shopify/sarama/go.sum
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -1,51 +1,93 @@
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
+github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
+github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
+github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
+github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
+github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA=
+github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
+github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
+github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
+github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
+github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
-github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/xdg/scram v1.0.3 h1:nTadYh2Fs4BK2xdldEa2g5bbaZp0/+1nJMMPtPxS/to=
+github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4=
+github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a h1:njMmldwFTyDLqonHMagNXKBWptTBeDZOdblgaDsNEGQ=
+golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
-gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
index 49b632d..ab8b701 100644
--- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
+++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
@@ -2,17 +2,20 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
 	"io"
+	"math"
 	"strings"
 	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 const (
@@ -33,6 +36,7 @@
 	Username           string
 	Password           string
 	Realm              string
+	DisablePAFXFAST    bool
 }
 
 type GSSAPIKerberosAuth struct {
@@ -51,15 +55,14 @@
 	Destroy()
 }
 
-/*
-*
-* Appends length in big endian before payload, and send it to kafka
-*
- */
-
+// writePackage appends length in big endian before the payload, and sends it to kafka
 func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
-	length := len(payload)
-	finalPackage := make([]byte, length+4) //4 byte length header + payload
+	length := uint64(len(payload))
+	size := length + 4 // 4 byte length header + payload
+	if size > math.MaxInt32 {
+		return 0, errors.New("payload too large, will overflow int32")
+	}
+	finalPackage := make([]byte, size)
 	copy(finalPackage[4:], payload)
 	binary.BigEndian.PutUint32(finalPackage, uint32(length))
 	bytes, err := broker.conn.Write(finalPackage)
@@ -69,12 +72,7 @@
 	return bytes, nil
 }
 
-/*
-*
-* Read length (4 bytes) and then read the payload
-*
- */
-
+// readPackage reads payload length (4 bytes) and then reads the payload into []byte
 func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
 	bytesRead := 0
 	lengthInBytes := make([]byte, 4)
@@ -152,7 +150,7 @@
 *
  */
 func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
-	oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
+	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
 	if err != nil {
 		return nil, err
 	}
@@ -199,7 +197,6 @@
 
 /* This does the handshake for authorization */
 func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-
 	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
 	if err != nil {
 		Logger.Printf("Kerberos client error: %s", err)
@@ -218,7 +215,6 @@
 	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
 
 	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
-
 	if err != nil {
 		Logger.Printf("Error getting Kerberos service ticket : %s", err)
 		return err
@@ -242,7 +238,7 @@
 		}
 		broker.updateOutgoingCommunicationMetrics(bytesWritten)
 		if krbAuth.step == GSS_API_VERIFY {
-			var bytesRead = 0
+			bytesRead := 0
 			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
 			requestLatency := time.Since(requestTime)
 			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
index ce49c47..e9d9af1 100644
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -42,6 +42,10 @@
 	return 0
 }
 
+func (r *HeartbeatRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
index 766f5fd..577ab72 100644
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -27,6 +27,10 @@
 	return 0
 }
 
+func (r *HeartbeatResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
new file mode 100644
index 0000000..c4d05a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
@@ -0,0 +1,173 @@
+package sarama
+
+type IncrementalAlterConfigsOperation int8
+
+const (
+	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
+	IncrementalAlterConfigsOperationDelete
+	IncrementalAlterConfigsOperationAppend
+	IncrementalAlterConfigsOperationSubtract
+)
+
+// IncrementalAlterConfigsRequest is an incremental alter config request type
+type IncrementalAlterConfigsRequest struct {
+	Resources    []*IncrementalAlterConfigsResource
+	ValidateOnly bool
+}
+
+type IncrementalAlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]IncrementalAlterConfigsEntry
+}
+
+type IncrementalAlterConfigsEntry struct {
+	Operation IncrementalAlterConfigsOperation
+	Value     *string
+}
+
+func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &IncrementalAlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+
+	for name, e := range a.ConfigEntries {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
+		for i := 0; i < n; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			var v IncrementalAlterConfigsEntry
+
+			if err := v.decode(pd, version); err != nil {
+				return err
+			}
+
+			a.ConfigEntries[name] = v
+		}
+	}
+	return err
+}
+
+func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Operation))
+
+	if err := pe.putNullableString(a.Value); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = IncrementalAlterConfigsOperation(t)
+
+	s, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	a.Value = s
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) key() int16 {
+	return 44
+}
+
+func (a *IncrementalAlterConfigsRequest) version() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
+	return V2_3_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
new file mode 100644
index 0000000..3e8c450
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
@@ -0,0 +1,66 @@
+package sarama
+
+import "time"
+
+// IncrementalAlterConfigsResponse is a response type for incremental alter config
+type IncrementalAlterConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) key() int16 {
+	return 44
+}
+
+func (a *IncrementalAlterConfigsResponse) version() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
+	return V2_3_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
index 8ceb6c2..6894443 100644
--- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -38,6 +38,10 @@
 	return 0
 }
 
+func (i *InitProducerIDRequest) headerVersion() int16 {
+	return 1
+}
+
 func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
index 1b32eb0..3e1242b 100644
--- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -50,6 +50,10 @@
 	return 0
 }
 
+func (i *InitProducerIDResponse) headerVersion() int16 {
+	return 0
+}
+
 func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go
new file mode 100644
index 0000000..d0d33e5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/interceptors.go
@@ -0,0 +1,43 @@
+package sarama
+
+// ProducerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the producer before they are published to the Kafka cluster.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ProducerInterceptor interface {
+
+	// OnSend is called when the producer message is intercepted. Please avoid
+	// modifying the message until it's safe to do so, as this is _not_ a copy
+	// of the message.
+	OnSend(*ProducerMessage)
+}
+
+// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the consumer before they are sent to the messages channel.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ConsumerInterceptor interface {
+
+	// OnConsume is called when the consumed message is intercepted. Please
+	// avoid modifying the message until it's safe to do so, as this is _not_ a
+	// copy of the message.
+	OnConsume(*ConsumerMessage)
+}
+
+func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
+		}
+	}()
+
+	interceptor.OnSend(msg)
+}
+
+func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
+		}
+	}()
+
+	interceptor.OnConsume(msg)
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
index 97e9299..3734e82 100644
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -134,6 +134,10 @@
 	return r.Version
 }
 
+func (r *JoinGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
index 5752acc..54b0a45 100644
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -123,6 +123,10 @@
 	return r.Version
 }
 
+func (r *JoinGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 2:
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
index 91b998f..01a5319 100644
--- a/vendor/github.com/Shopify/sarama/kerberos_client.go
+++ b/vendor/github.com/Shopify/sarama/kerberos_client.go
@@ -1,10 +1,10 @@
 package sarama
 
 import (
-	krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
-	krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
+	krb5client "github.com/jcmturner/gokrb5/v8/client"
+	krb5config "github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 type KerberosGoKrb5Client struct {
@@ -19,14 +19,9 @@
 	return c.Credentials.CName()
 }
 
-/*
-*
-* Create kerberos client used to obtain TGT and TGS tokens
-* used gokrb5 library, which is a pure go kerberos client with
-* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
-* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
-*
- */
+// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
+// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
+// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
 func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
 	cfg, err := krb5config.Load(config.KerberosConfigPath)
 	if err != nil {
@@ -42,10 +37,10 @@
 		if err != nil {
 			return nil, err
 		}
-		client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
+		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
 	} else {
-		client = krb5client.NewClientWithPassword(config.Username,
-			config.Realm, config.Password, cfg)
+		client = krb5client.NewWithPassword(config.Username,
+			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
 	}
 	return &KerberosGoKrb5Client{*client}, nil
 }
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
index e177427..d7789b6 100644
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -35,6 +35,10 @@
 	return 0
 }
 
+func (r *LeaveGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
index d60c626..25f8d5e 100644
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -27,6 +27,10 @@
 	return 0
 }
 
+func (r *LeaveGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
index 3b16abf..4553b2d 100644
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -1,7 +1,6 @@
 package sarama
 
-type ListGroupsRequest struct {
-}
+type ListGroupsRequest struct{}
 
 func (r *ListGroupsRequest) encode(pe packetEncoder) error {
 	return nil
@@ -19,6 +18,10 @@
 	return 0
 }
 
+func (r *ListGroupsRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
index 56115d4..777bae7 100644
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -64,6 +64,10 @@
 	return 0
 }
 
+func (r *ListGroupsResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
new file mode 100644
index 0000000..c1ffa9b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
@@ -0,0 +1,98 @@
+package sarama
+
+type ListPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string][]int32
+	Version   int16
+}
+
+func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putCompactArrayLength(len(r.blocks))
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putCompactInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getCompactString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getCompactArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.blocks[topic][j] = partition
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return
+}
+
+func (r *ListPartitionReassignmentsRequest) key() int16 {
+	return 46
+}
+
+func (r *ListPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = partitionIDs
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
new file mode 100644
index 0000000..4baa6a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
@@ -0,0 +1,169 @@
+package sarama
+
+type PartitionReplicaReassignmentsStatus struct {
+	Replicas         []int32
+	AddingReplicas   []int32
+	RemovingReplicas []int32
+}
+
+func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
+	if err := pe.putCompactInt32Array(b.Replicas); err != nil {
+		return err
+	}
+	if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
+		return err
+	}
+	if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
+	if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
+		return err
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return err
+}
+
+type ListPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
+}
+
+func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
+	if r.TopicStatus == nil {
+		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
+	}
+	partitions := r.TopicStatus[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
+		r.TopicStatus[topic] = partitions
+	}
+
+	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
+}
+
+func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putInt16(int16(r.ErrorCode))
+	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	pe.putCompactArrayLength(len(r.TopicStatus))
+	for topic, partitions := range r.TopicStatus {
+		if err := pe.putCompactString(topic); err != nil {
+			return err
+		}
+		pe.putCompactArrayLength(len(partitions))
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.ErrorCode = KError(kerr)
+
+	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getCompactArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getCompactString()
+		if err != nil {
+			return err
+		}
+
+		ongoingPartitionReassignments, err := pd.getCompactArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
+
+		for j := 0; j < ongoingPartitionReassignments; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := &PartitionReplicaReassignmentsStatus{}
+			if err := block.decode(pd); err != nil {
+				return err
+			}
+			r.TopicStatus[topic][partition] = block
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) key() int16 {
+	return 46
+}
+
+func (r *ListPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
index 7c54748..fd0d1d9 100644
--- a/vendor/github.com/Shopify/sarama/message.go
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -6,15 +6,15 @@
 )
 
 const (
-	//CompressionNone no compression
+	// CompressionNone no compression
 	CompressionNone CompressionCodec = iota
-	//CompressionGZIP compression using GZIP
+	// CompressionGZIP compression using GZIP
 	CompressionGZIP
-	//CompressionSnappy compression using snappy
+	// CompressionSnappy compression using snappy
 	CompressionSnappy
-	//CompressionLZ4 compression using LZ4
+	// CompressionLZ4 compression using LZ4
 	CompressionLZ4
-	//CompressionZSTD compression using ZSTD
+	// CompressionZSTD compression using ZSTD
 	CompressionZSTD
 
 	// The lowest 3 bits contain the compression codec used for the message
@@ -42,7 +42,7 @@
 	}[int(cc)]
 }
 
-//Message is a kafka message type
+// Message is a kafka message type
 type Message struct {
 	Codec            CompressionCodec // codec used to compress the message contents
 	CompressionLevel int              // compression level
@@ -85,7 +85,6 @@
 		payload = m.compressedCache
 		m.compressedCache = nil
 	} else if m.Value != nil {
-
 		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
 		if err != nil {
 			return err
@@ -147,18 +146,12 @@
 	// for future metrics about the compression ratio in fetch requests
 	m.compressedSize = len(m.Value)
 
-	switch m.Codec {
-	case CompressionNone:
-		// nothing to do
-	default:
-		if m.Value == nil {
-			break
-		}
-
+	if m.Value != nil && m.Codec != CompressionNone {
 		m.Value, err = decompress(m.Codec, m.Value)
 		if err != nil {
 			return err
 		}
+
 		if err := m.decodeSet(); err != nil {
 			return err
 		}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
index 1b590d3..e835f5a 100644
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -65,6 +65,10 @@
 	return r.Version
 }
 
+func (r *MetadataRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *MetadataRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
index b2d532e..0bb8702 100644
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -255,6 +255,10 @@
 	return r.Version
 }
 
+func (r *MetadataResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *MetadataResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -318,5 +322,4 @@
 	pmatch.Isr = isr
 	pmatch.OfflineReplicas = offline
 	pmatch.Err = err
-
 }
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
index 4ed46a6..c2654d1 100644
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -20,7 +20,7 @@
 
 type GSSApiHandlerFunc func([]byte) []byte
 
-type requestHandlerFunc func(req *request) (res encoder)
+type requestHandlerFunc func(req *request) (res encoderWithHeader)
 
 // RequestNotifierFunc is invoked when a mock broker processes a request successfully
 // and will provides the number of bytes read and written.
@@ -30,7 +30,7 @@
 // to facilitate testing of higher level or specialized consumers and producers
 // built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
 // but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshaling, response marshaling, and makes it the test
+// transport, request unmarshalling, response marshalling, and makes it the test
 // writer responsibility to program correct according to the Kafka API protocol
 // MockBroker behaviour.
 //
@@ -55,7 +55,7 @@
 	port          int32
 	closing       chan none
 	stopper       chan none
-	expectations  chan encoder
+	expectations  chan encoderWithHeader
 	listener      net.Listener
 	t             TestReporter
 	latency       time.Duration
@@ -83,7 +83,7 @@
 // and uses the found MockResponse instance to generate an appropriate reply.
 // If the request type is not found in the map then nothing is sent.
 func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
-	b.setHandler(func(req *request) (res encoder) {
+	b.setHandler(func(req *request) (res encoderWithHeader) {
 		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
 		mockResponse := handlerMap[reqTypeName]
 		if mockResponse == nil {
@@ -213,11 +213,13 @@
 	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
 }
 
-func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
 	defer wg.Done()
 	defer func() {
 		_ = conn.Close()
 	}()
+	s := spew.NewDefaultConfig()
+	s.MaxDepth = 1
 	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
 	var err error
 
@@ -231,11 +233,9 @@
 		}
 	}()
 
-	resHeader := make([]byte, 8)
 	var bytesWritten int
 	var bytesRead int
 	for {
-
 		buffer, err := b.readToBytes(conn)
 		if err != nil {
 			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
@@ -245,7 +245,6 @@
 
 		bytesWritten = 0
 		if !b.isGSSAPI(buffer) {
-
 			req, br, err := decodeRequest(bytes.NewReader(buffer))
 			bytesRead = br
 			if err != nil {
@@ -267,7 +266,12 @@
 				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
 				continue
 			}
-			Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+			Logger.Printf(
+				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
+				b.brokerID, idx, req.body, res,
+				s.Sprintf("%#v", req.body),
+				s.Sprintf("%#v", res),
+			)
 
 			encodedRes, err := encode(res, nil)
 			if err != nil {
@@ -283,8 +287,7 @@
 				continue
 			}
 
-			binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
-			binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
 			if _, err = conn.Write(resHeader); err != nil {
 				b.serverError(err)
 				break
@@ -294,7 +297,6 @@
 				break
 			}
 			bytesWritten = len(resHeader) + len(encodedRes)
-
 		} else {
 			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
 			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
@@ -317,12 +319,29 @@
 			b.notifier(bytesRead, bytesWritten)
 		}
 		b.lock.Unlock()
-
 	}
 	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
 }
 
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
+	headerLength := uint32(8)
+
+	if headerVersion >= 1 {
+		headerLength = 9
+	}
+
+	resHeader := make([]byte, headerLength)
+	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
+	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
+
+	if headerVersion >= 1 {
+		binary.PutUvarint(resHeader[8:], 0)
+	}
+
+	return resHeader
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
 	select {
 	case res, ok := <-b.expectations:
 		if !ok {
@@ -377,7 +396,7 @@
 		stopper:      make(chan none),
 		t:            t,
 		brokerID:     brokerID,
-		expectations: make(chan encoder, 512),
+		expectations: make(chan encoderWithHeader, 512),
 		listener:     listener,
 	}
 	broker.handler = broker.defaultRequestHandler
@@ -398,6 +417,6 @@
 	return broker
 }
 
-func (b *MockBroker) Returns(e encoder) {
+func (b *MockBroker) Returns(e encoderWithHeader) {
 	b.expectations <- e
 }
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go
index affeb2d..a43607e 100644
--- a/vendor/github.com/Shopify/sarama/mockkerberos.go
+++ b/vendor/github.com/Shopify/sarama/mockkerberos.go
@@ -3,11 +3,12 @@
 import (
 	"encoding/binary"
 	"encoding/hex"
-	"gopkg.in/jcmturner/gokrb5.v7/credentials"
-	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
+
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
 )
 
 type KafkaGSSAPIHandler struct {
@@ -26,7 +27,7 @@
 		return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
 	}
 
-	var pack = gssapi.WrapToken{
+	pack := gssapi.WrapToken{
 		Flags:     KRB5_USER_AUTH,
 		EC:        12,
 		RRC:       0,
@@ -55,7 +56,6 @@
 }
 
 type MockKerberosClient struct {
-	asReqBytes  string
 	asRepBytes  string
 	ASRep       messages.ASRep
 	credentials *credentials.Credentials
@@ -108,8 +108,9 @@
 func (c *MockKerberosClient) Domain() string {
 	return "EXAMPLE.COM"
 }
+
 func (c *MockKerberosClient) CName() types.PrincipalName {
-	var p = types.PrincipalName{
+	p := types.PrincipalName{
 		NameType: KRB5_USER_AUTH,
 		NameString: []string{
 			"kafka",
@@ -118,6 +119,7 @@
 	}
 	return p
 }
+
 func (c *MockKerberosClient) Destroy() {
 	// Do nothing.
 }
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
index c78f0ac..6654ed0 100644
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -18,20 +18,20 @@
 // allows generating a response based on a request body. MockResponses are used
 // to program behavior of MockBroker in tests.
 type MockResponse interface {
-	For(reqBody versionedDecoder) (res encoder)
+	For(reqBody versionedDecoder) (res encoderWithHeader)
 }
 
 // MockWrapper is a mock response builder that returns a particular concrete
 // response regardless of the actual request passed to the `For` method.
 type MockWrapper struct {
-	res encoder
+	res encoderWithHeader
 }
 
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
 	return mw.res
 }
 
-func NewMockWrapper(res encoder) *MockWrapper {
+func NewMockWrapper(res encoderWithHeader) *MockWrapper {
 	return &MockWrapper{res: res}
 }
 
@@ -50,7 +50,7 @@
 		switch res := res.(type) {
 		case MockResponse:
 			ms.responses[i] = res
-		case encoder:
+		case encoderWithHeader:
 			ms.responses[i] = NewMockWrapper(res)
 		default:
 			panic(fmt.Sprintf("Unexpected response type: %T", res))
@@ -59,7 +59,7 @@
 	return ms
 }
 
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
 	res = mc.responses[0].For(reqBody)
 	if len(mc.responses) > 1 {
 		mc.responses = mc.responses[1:]
@@ -79,7 +79,7 @@
 	}
 }
 
-func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	request := reqBody.(*ListGroupsRequest)
 	_ = request
 	response := &ListGroupsResponse{
@@ -110,7 +110,7 @@
 	return m
 }
 
-func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	request := reqBody.(*DescribeGroupsRequest)
 
 	response := &DescribeGroupsResponse{}
@@ -166,7 +166,7 @@
 	return mmr
 }
 
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	metadataRequest := reqBody.(*MetadataRequest)
 	metadataResponse := &MetadataResponse{
 		Version:      metadataRequest.version(),
@@ -177,8 +177,8 @@
 	}
 
 	// Generate set of replicas
-	replicas := []int32{}
-	offlineReplicas := []int32{}
+	var replicas []int32
+	var offlineReplicas []int32
 	for _, brokerID := range mmr.brokers {
 		replicas = append(replicas, brokerID)
 	}
@@ -233,7 +233,7 @@
 	return mor
 }
 
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	offsetRequest := reqBody.(*OffsetRequest)
 	offsetResponse := &OffsetResponse{Version: mor.version}
 	for topic, partitions := range offsetRequest.blocks {
@@ -309,7 +309,7 @@
 	return mfr
 }
 
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	fetchRequest := reqBody.(*FetchRequest)
 	res := &FetchResponse{
 		Version: mfr.version,
@@ -393,7 +393,7 @@
 	return mr
 }
 
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*ConsumerMetadataRequest)
 	group := req.ConsumerGroup
 	res := &ConsumerMetadataResponse{}
@@ -442,7 +442,7 @@
 	return mr
 }
 
-func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*FindCoordinatorRequest)
 	res := &FindCoordinatorResponse{}
 	var v interface{}
@@ -489,7 +489,7 @@
 	return mr
 }
 
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*OffsetCommitRequest)
 	group := req.ConsumerGroup
 	res := &OffsetCommitResponse{}
@@ -546,7 +546,7 @@
 	return mr
 }
 
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*ProduceRequest)
 	res := &ProduceResponse{
 		Version: mr.version,
@@ -605,7 +605,7 @@
 	return mr
 }
 
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*OffsetFetchRequest)
 	group := req.ConsumerGroup
 	res := &OffsetFetchResponse{Version: req.Version}
@@ -630,7 +630,7 @@
 	return &MockCreateTopicsResponse{t: t}
 }
 
-func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreateTopicsRequest)
 	res := &CreateTopicsResponse{
 		Version: req.Version,
@@ -659,7 +659,7 @@
 	return &MockDeleteTopicsResponse{t: t}
 }
 
-func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteTopicsRequest)
 	res := &DeleteTopicsResponse{}
 	res.TopicErrorCodes = make(map[string]KError)
@@ -667,6 +667,7 @@
 	for _, topic := range req.Topics {
 		res.TopicErrorCodes[topic] = ErrNoError
 	}
+	res.Version = req.Version
 	return res
 }
 
@@ -678,7 +679,7 @@
 	return &MockCreatePartitionsResponse{t: t}
 }
 
-func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreatePartitionsRequest)
 	res := &CreatePartitionsResponse{}
 	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
@@ -697,6 +698,43 @@
 	return res
 }
 
+type MockAlterPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
+	return &MockAlterPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterPartitionReassignmentsRequest)
+	_ = req
+	res := &AlterPartitionReassignmentsResponse{}
+	return res
+}
+
+type MockListPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
+	return &MockListPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ListPartitionReassignmentsRequest)
+	_ = req
+	res := &ListPartitionReassignmentsResponse{}
+
+	for topic, partitions := range req.blocks {
+		for _, partition := range partitions {
+			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
+		}
+	}
+
+	return res
+}
+
 type MockDeleteRecordsResponse struct {
 	t TestReporter
 }
@@ -705,7 +743,7 @@
 	return &MockDeleteRecordsResponse{t: t}
 }
 
-func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteRecordsRequest)
 	res := &DeleteRecordsResponse{}
 	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
@@ -728,31 +766,87 @@
 	return &MockDescribeConfigsResponse{t: t}
 }
 
-func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{}
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	includeSynonyms := req.Version > 0
+	includeSource := req.Version > 0
 
 	for _, r := range req.Resources {
 		var configEntries []*ConfigEntry
 		switch r.Type {
-		case TopicResource:
+		case BrokerResource:
 			configEntries = append(configEntries,
-				&ConfigEntry{Name: "max.message.bytes",
-					Value:     "1000000",
-					ReadOnly:  false,
-					Default:   true,
-					Sensitive: false,
-				}, &ConfigEntry{Name: "retention.ms",
-					Value:     "5000",
-					ReadOnly:  false,
-					Default:   false,
-					Sensitive: false,
-				}, &ConfigEntry{Name: "password",
-					Value:     "12345",
-					ReadOnly:  false,
-					Default:   false,
-					Sensitive: true,
-				})
+				&ConfigEntry{
+					Name:     "min.insync.replicas",
+					Value:    "2",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case BrokerLoggerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "kafka.controller.KafkaController",
+					Value:    "DEBUG",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case TopicResource:
+			maxMessageBytes := &ConfigEntry{
+				Name:      "max.message.bytes",
+				Value:     "1000000",
+				ReadOnly:  false,
+				Default:   !includeSource,
+				Sensitive: false,
+			}
+			if includeSource {
+				maxMessageBytes.Source = SourceDefault
+			}
+			if includeSynonyms {
+				maxMessageBytes.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "max.message.bytes",
+						ConfigValue: "500000",
+					},
+				}
+			}
+			retentionMs := &ConfigEntry{
+				Name:      "retention.ms",
+				Value:     "5000",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: false,
+			}
+			if includeSynonyms {
+				retentionMs.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "log.retention.ms",
+						ConfigValue: "2500",
+					},
+				}
+			}
+			password := &ConfigEntry{
+				Name:      "password",
+				Value:     "12345",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: true,
+			}
+			configEntries = append(
+				configEntries, maxMessageBytes, retentionMs, password)
 			res.Resources = append(res.Resources, &ResourceResponse{
 				Name:    r.Name,
 				Configs: configEntries,
@@ -762,6 +856,31 @@
 	return res
 }
 
+type MockDescribeConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
+	return &MockDescribeConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &ResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
 type MockAlterConfigsResponse struct {
 	t TestReporter
 }
@@ -770,19 +889,43 @@
 	return &MockAlterConfigsResponse{t: t}
 }
 
-func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*AlterConfigsRequest)
 	res := &AlterConfigsResponse{}
 
 	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
-			Type:     TopicResource,
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
 			ErrorMsg: "",
 		})
 	}
 	return res
 }
 
+type MockAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
+	return &MockAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
 type MockCreateAclsResponse struct {
 	t TestReporter
 }
@@ -791,7 +934,7 @@
 	return &MockCreateAclsResponse{t: t}
 }
 
-func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*CreateAclsRequest)
 	res := &CreateAclsResponse{}
 
@@ -809,17 +952,35 @@
 	return &MockListAclsResponse{t: t}
 }
 
-func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DescribeAclsRequest)
 	res := &DescribeAclsResponse{}
-
 	res.Err = ErrNoError
 	acl := &ResourceAcls{}
-	acl.Resource.ResourceName = *req.ResourceName
+	if req.ResourceName != nil {
+		acl.Resource.ResourceName = *req.ResourceName
+	}
+	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
 	acl.Resource.ResourceType = req.ResourceType
-	acl.Acls = append(acl.Acls, &Acl{})
-	res.ResourceAcls = append(res.ResourceAcls, acl)
 
+	host := "*"
+	if req.Host != nil {
+		host = *req.Host
+	}
+
+	principal := "User:test"
+	if req.Principal != nil {
+		principal = *req.Principal
+	}
+
+	permissionType := req.PermissionType
+	if permissionType == AclPermissionAny {
+		permissionType = AclPermissionAllow
+	}
+
+	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+	res.Version = int16(req.Version)
 	return res
 }
 
@@ -833,7 +994,7 @@
 	return &MockSaslAuthenticateResponse{t: t}
 }
 
-func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder {
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	res := &SaslAuthenticateResponse{}
 	res.Err = msar.kerror
 	res.SaslAuthBytes = msar.saslAuthBytes
@@ -864,7 +1025,7 @@
 	return &MockSaslHandshakeResponse{t: t}
 }
 
-func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder {
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	res := &SaslHandshakeResponse{}
 	res.Err = mshr.kerror
 	res.EnabledMechanisms = mshr.enabledMechanisms
@@ -885,7 +1046,7 @@
 	return &MockDeleteAclsResponse{t: t}
 }
 
-func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	req := reqBody.(*DeleteAclsRequest)
 	res := &DeleteAclsResponse{}
 
@@ -894,6 +1055,7 @@
 		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
 		res.FilterResponses = append(res.FilterResponses, response)
 	}
+	res.Version = int16(req.Version)
 	return res
 }
 
@@ -910,7 +1072,7 @@
 	return m
 }
 
-func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder {
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
 	resp := &DeleteGroupsResponse{
 		GroupErrorCodes: map[string]KError{},
 	}
@@ -919,3 +1081,193 @@
 	}
 	return resp
 }
+
+type MockJoinGroupResponse struct {
+	t TestReporter
+
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       map[string][]byte
+}
+
+func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
+	return &MockJoinGroupResponse{
+		t:       t,
+		Members: make(map[string][]byte),
+	}
+}
+
+func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*JoinGroupRequest)
+	resp := &JoinGroupResponse{
+		Version:       req.Version,
+		ThrottleTime:  m.ThrottleTime,
+		Err:           m.Err,
+		GenerationId:  m.GenerationId,
+		GroupProtocol: m.GroupProtocol,
+		LeaderId:      m.LeaderId,
+		MemberId:      m.MemberId,
+		Members:       m.Members,
+	}
+	return resp
+}
+
+func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
+	m.ThrottleTime = t
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
+	m.GenerationId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
+	m.GroupProtocol = proto
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
+	m.LeaderId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
+	m.MemberId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
+	bin, err := encode(meta, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member metadata: %v", err))
+	}
+	m.Members[id] = bin
+	return m
+}
+
+type MockLeaveGroupResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
+	return &MockLeaveGroupResponse{t: t}
+}
+
+func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &LeaveGroupResponse{
+		Err: m.Err,
+	}
+	return resp
+}
+
+func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockSyncGroupResponse struct {
+	t TestReporter
+
+	Err              KError
+	MemberAssignment []byte
+}
+
+func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
+	return &MockSyncGroupResponse{t: t}
+}
+
+func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &SyncGroupResponse{
+		Err:              m.Err,
+		MemberAssignment: m.MemberAssignment,
+	}
+	return resp
+}
+
+func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
+	bin, err := encode(assignment, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member assignment: %v", err))
+	}
+	m.MemberAssignment = bin
+	return m
+}
+
+type MockHeartbeatResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
+	return &MockHeartbeatResponse{t: t}
+}
+
+func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &HeartbeatResponse{}
+	return resp
+}
+
+func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockDescribeLogDirsResponse struct {
+	t       TestReporter
+	logDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
+	return &MockDescribeLogDirsResponse{t: t}
+}
+
+func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
+	var topics []DescribeLogDirsResponseTopic
+	for topic := range topicPartitions {
+		var partitions []DescribeLogDirsResponsePartition
+		for i := 0; i < topicPartitions[topic]; i++ {
+			partitions = append(partitions, DescribeLogDirsResponsePartition{
+				PartitionID: int32(i),
+				IsTemporary: false,
+				OffsetLag:   int64(0),
+				Size:        int64(1234),
+			})
+		}
+		topics = append(topics, DescribeLogDirsResponseTopic{
+			Topic:      topic,
+			Partitions: partitions,
+		})
+	}
+	logDir := DescribeLogDirsResponseDirMetadata{
+		ErrorCode: ErrNoError,
+		Path:      logDirPath,
+		Topics:    topics,
+	}
+	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
+	return m
+}
+
+func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	resp := &DescribeLogDirsResponse{
+		LogDirs: m.logDirs,
+	}
+	return resp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
index 5732ed9..9931cad 100644
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -170,6 +170,10 @@
 	return r.Version
 }
 
+func (r *OffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
index e842298..342260e 100644
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -94,6 +94,10 @@
 	return r.Version
 }
 
+func (r *OffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
index 6860824..7e147eb 100644
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -3,60 +3,155 @@
 type OffsetFetchRequest struct {
 	Version       int16
 	ConsumerGroup string
+	RequireStable bool // requires v7+
 	partitions    map[string][]int32
 }
 
 func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
-	if r.Version < 0 || r.Version > 5 {
+	if r.Version < 0 || r.Version > 7 {
 		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
 	}
 
-	if err = pe.putString(r.ConsumerGroup); err != nil {
+	isFlexible := r.Version >= 6
+
+	if isFlexible {
+		err = pe.putCompactString(r.ConsumerGroup)
+	} else {
+		err = pe.putString(r.ConsumerGroup)
+	}
+	if err != nil {
 		return err
 	}
 
-	if r.Version >= 2 && r.partitions == nil {
-		pe.putInt32(-1)
-	} else {
-		if err = pe.putArrayLength(len(r.partitions)); err != nil {
-			return err
+	if isFlexible {
+		if r.partitions == nil {
+			pe.putUVarint(0)
+		} else {
+			pe.putCompactArrayLength(len(r.partitions))
 		}
-		for topic, partitions := range r.partitions {
-			if err = pe.putString(topic); err != nil {
-				return err
-			}
-			if err = pe.putInt32Array(partitions); err != nil {
+	} else {
+		if r.partitions == nil && r.Version >= 2 {
+			pe.putInt32(-1)
+		} else {
+			if err = pe.putArrayLength(len(r.partitions)); err != nil {
 				return err
 			}
 		}
 	}
+
+	for topic, partitions := range r.partitions {
+		if isFlexible {
+			err = pe.putCompactString(topic)
+		} else {
+			err = pe.putString(topic)
+		}
+		if err != nil {
+			return err
+		}
+
+		//
+
+		if isFlexible {
+			err = pe.putCompactInt32Array(partitions)
+		} else {
+			err = pe.putInt32Array(partitions)
+		}
+		if err != nil {
+			return err
+		}
+
+		if isFlexible {
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+
+	if r.RequireStable && r.Version < 7 {
+		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
+	}
+
+	if r.Version >= 7 {
+		pe.putBool(r.RequireStable)
+	}
+
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
 	return nil
 }
 
 func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
-	if r.ConsumerGroup, err = pd.getString(); err != nil {
-		return err
+	isFlexible := r.Version >= 6
+	if isFlexible {
+		r.ConsumerGroup, err = pd.getCompactString()
+	} else {
+		r.ConsumerGroup, err = pd.getString()
 	}
-	partitionCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
 	}
+
+	var partitionCount int
+
+	if isFlexible {
+		partitionCount, err = pd.getCompactArrayLength()
+	} else {
+		partitionCount, err = pd.getArrayLength()
+	}
+	if err != nil {
+		return err
+	}
+
 	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
 		return nil
 	}
-	r.partitions = make(map[string][]int32)
+
+	r.partitions = make(map[string][]int32, partitionCount)
 	for i := 0; i < partitionCount; i++ {
-		topic, err := pd.getString()
+		var topic string
+		if isFlexible {
+			topic, err = pd.getCompactString()
+		} else {
+			topic, err = pd.getString()
+		}
 		if err != nil {
 			return err
 		}
-		partitions, err := pd.getInt32Array()
+
+		var partitions []int32
+		if isFlexible {
+			partitions, err = pd.getCompactInt32Array()
+		} else {
+			partitions, err = pd.getInt32Array()
+		}
 		if err != nil {
 			return err
 		}
+		if isFlexible {
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+
 		r.partitions[topic] = partitions
 	}
+
+	if r.Version >= 7 {
+		r.RequireStable, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	if isFlexible {
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -68,6 +163,14 @@
 	return r.Version
 }
 
+func (r *OffsetFetchRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+
+	return 1
+}
+
 func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -80,6 +183,10 @@
 		return V2_0_0_0
 	case 5:
 		return V2_1_0_0
+	case 6:
+		return V2_4_0_0
+	case 7:
+		return V2_5_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
index 9e25702..1944922 100644
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -8,6 +8,8 @@
 }
 
 func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	isFlexible := version >= 6
+
 	b.Offset, err = pd.getInt64()
 	if err != nil {
 		return err
@@ -20,7 +22,11 @@
 		}
 	}
 
-	b.Metadata, err = pd.getString()
+	if isFlexible {
+		b.Metadata, err = pd.getCompactString()
+	} else {
+		b.Metadata, err = pd.getString()
+	}
 	if err != nil {
 		return err
 	}
@@ -31,23 +37,37 @@
 	}
 	b.Err = KError(tmp)
 
+	if isFlexible {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
 func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	isFlexible := version >= 6
 	pe.putInt64(b.Offset)
 
 	if version >= 5 {
 		pe.putInt32(b.LeaderEpoch)
 	}
-
-	err = pe.putString(b.Metadata)
+	if isFlexible {
+		err = pe.putCompactString(b.Metadata)
+	} else {
+		err = pe.putString(b.Metadata)
+	}
 	if err != nil {
 		return err
 	}
 
 	pe.putInt16(int16(b.Err))
 
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
 	return nil
 }
 
@@ -58,19 +78,37 @@
 	Err            KError
 }
 
-func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
+	isFlexible := r.Version >= 6
+
 	if r.Version >= 3 {
 		pe.putInt32(r.ThrottleTimeMs)
 	}
-
-	if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+	if isFlexible {
+		pe.putCompactArrayLength(len(r.Blocks))
+	} else {
+		err = pe.putArrayLength(len(r.Blocks))
+	}
+	if err != nil {
 		return err
 	}
+
 	for topic, partitions := range r.Blocks {
-		if err := pe.putString(topic); err != nil {
+		if isFlexible {
+			err = pe.putCompactString(topic)
+		} else {
+			err = pe.putString(topic)
+		}
+		if err != nil {
 			return err
 		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
+
+		if isFlexible {
+			pe.putCompactArrayLength(len(partitions))
+		} else {
+			err = pe.putArrayLength(len(partitions))
+		}
+		if err != nil {
 			return err
 		}
 		for partition, block := range partitions {
@@ -79,15 +117,22 @@
 				return err
 			}
 		}
+		if isFlexible {
+			pe.putEmptyTaggedFieldArray()
+		}
 	}
 	if r.Version >= 2 {
 		pe.putInt16(int16(r.Err))
 	}
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
 	return nil
 }
 
 func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
 	r.Version = version
+	isFlexible := version >= 6
 
 	if version >= 3 {
 		r.ThrottleTimeMs, err = pd.getInt32()
@@ -96,7 +141,12 @@
 		}
 	}
 
-	numTopics, err := pd.getArrayLength()
+	var numTopics int
+	if isFlexible {
+		numTopics, err = pd.getCompactArrayLength()
+	} else {
+		numTopics, err = pd.getArrayLength()
+	}
 	if err != nil {
 		return err
 	}
@@ -104,22 +154,30 @@
 	if numTopics > 0 {
 		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
 		for i := 0; i < numTopics; i++ {
-			name, err := pd.getString()
+			var name string
+			if isFlexible {
+				name, err = pd.getCompactString()
+			} else {
+				name, err = pd.getString()
+			}
 			if err != nil {
 				return err
 			}
 
-			numBlocks, err := pd.getArrayLength()
+			var numBlocks int
+			if isFlexible {
+				numBlocks, err = pd.getCompactArrayLength()
+			} else {
+				numBlocks, err = pd.getArrayLength()
+			}
 			if err != nil {
 				return err
 			}
 
-			if numBlocks == 0 {
-				r.Blocks[name] = nil
-				continue
+			r.Blocks[name] = nil
+			if numBlocks > 0 {
+				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
 			}
-			r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-
 			for j := 0; j < numBlocks; j++ {
 				id, err := pd.getInt32()
 				if err != nil {
@@ -131,8 +189,15 @@
 				if err != nil {
 					return err
 				}
+
 				r.Blocks[name][id] = block
 			}
+
+			if isFlexible {
+				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
 		}
 	}
 
@@ -144,6 +209,12 @@
 		r.Err = KError(kerr)
 	}
 
+	if isFlexible {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -155,6 +226,14 @@
 	return r.Version
 }
 
+func (r *OffsetFetchResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+
+	return 0
+}
+
 func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -167,6 +246,10 @@
 		return V2_0_0_0
 	case 5:
 		return V2_1_0_0
+	case 6:
+		return V2_4_0_0
+	case 7:
+		return V2_5_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
index 923972f..4f480a0 100644
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -19,6 +19,10 @@
 	// will otherwise leak memory. You must call this after all the
 	// PartitionOffsetManagers are closed.
 	Close() error
+
+	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
+	// set to false.
+	Commit()
 }
 
 type offsetManager struct {
@@ -58,7 +62,6 @@
 		client: client,
 		conf:   conf,
 		group:  group,
-		ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
 		poms:   make(map[string]map[int32]*partitionOffsetManager),
 
 		memberID:   memberID,
@@ -67,7 +70,10 @@
 		closing: make(chan none),
 		closed:  make(chan none),
 	}
-	go withRecover(om.mainLoop)
+	if conf.Consumer.Offsets.AutoCommit.Enable {
+		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
+		go withRecover(om.mainLoop)
+	}
 
 	return om, nil
 }
@@ -99,16 +105,20 @@
 	om.closeOnce.Do(func() {
 		// exit the mainLoop
 		close(om.closing)
-		<-om.closed
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			<-om.closed
+		}
 
 		// mark all POMs as closed
 		om.asyncClosePOMs()
 
 		// flush one last time
-		for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
-			om.flushToBroker()
-			if om.releasePOMs(false) == 0 {
-				break
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+				om.flushToBroker()
+				if om.releasePOMs(false) == 0 {
+					break
+				}
 			}
 		}
 
@@ -225,14 +235,18 @@
 	for {
 		select {
 		case <-om.ticker.C:
-			om.flushToBroker()
-			om.releasePOMs(false)
+			om.Commit()
 		case <-om.closing:
 			return
 		}
 	}
 }
 
+func (om *offsetManager) Commit() {
+	om.flushToBroker()
+	om.releasePOMs(false)
+}
+
 func (om *offsetManager) flushToBroker() {
 	req := om.constructRequest()
 	if req == nil {
@@ -275,7 +289,6 @@
 			ConsumerID:              om.memberID,
 			ConsumerGroupGeneration: om.generation,
 		}
-
 	}
 
 	om.pomsLock.RLock()
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
index 326c372..4c9ce4d 100644
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -6,7 +6,7 @@
 }
 
 func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(int64(b.time))
+	pe.putInt64(b.time)
 	if version == 0 {
 		pe.putInt32(b.maxOffsets)
 	}
@@ -28,6 +28,7 @@
 
 type OffsetRequest struct {
 	Version        int16
+	IsolationLevel IsolationLevel
 	replicaID      int32
 	isReplicaIDSet bool
 	blocks         map[string]map[int32]*offsetRequestBlock
@@ -41,6 +42,10 @@
 		pe.putInt32(-1)
 	}
 
+	if r.Version >= 2 {
+		pe.putBool(r.IsolationLevel == ReadCommitted)
+	}
+
 	err := pe.putArrayLength(len(r.blocks))
 	if err != nil {
 		return err
@@ -75,6 +80,18 @@
 		r.SetReplicaID(replicaID)
 	}
 
+	if r.Version >= 2 {
+		tmp, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		r.IsolationLevel = ReadUncommitted
+		if tmp {
+			r.IsolationLevel = ReadCommitted
+		}
+	}
+
 	blockCount, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -116,10 +133,16 @@
 	return r.Version
 }
 
+func (r *OffsetRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *OffsetRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
 		return V0_10_1_0
+	case 2:
+		return V0_11_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
index 8b2193f..69349ef 100644
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -50,11 +50,19 @@
 }
 
 type OffsetResponse struct {
-	Version int16
-	Blocks  map[string]map[int32]*OffsetResponseBlock
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetResponseBlock
 }
 
 func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 2 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
 	numTopics, err := pd.getArrayLength()
 	if err != nil {
 		return err
@@ -120,6 +128,10 @@
 
 */
 func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
 	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
 		return err
 	}
@@ -150,10 +162,16 @@
 	return r.Version
 }
 
+func (r *OffsetResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *OffsetResponse) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
 		return V0_10_1_0
+	case 2:
+		return V0_11_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
index 9be854c..184bc26 100644
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -10,15 +10,22 @@
 	getInt32() (int32, error)
 	getInt64() (int64, error)
 	getVarint() (int64, error)
+	getUVarint() (uint64, error)
 	getArrayLength() (int, error)
+	getCompactArrayLength() (int, error)
 	getBool() (bool, error)
+	getEmptyTaggedFieldArray() (int, error)
 
 	// Collections
 	getBytes() ([]byte, error)
 	getVarintBytes() ([]byte, error)
+	getCompactBytes() ([]byte, error)
 	getRawBytes(length int) ([]byte, error)
 	getString() (string, error)
 	getNullableString() (*string, error)
+	getCompactString() (string, error)
+	getCompactNullableString() (*string, error)
+	getCompactInt32Array() ([]int32, error)
 	getInt32Array() ([]int32, error)
 	getInt64Array() ([]int64, error)
 	getStringArray() ([]string, error)
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
index 67b8dae..aea53ca 100644
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -12,18 +12,26 @@
 	putInt32(in int32)
 	putInt64(in int64)
 	putVarint(in int64)
+	putUVarint(in uint64)
+	putCompactArrayLength(in int)
 	putArrayLength(in int) error
 	putBool(in bool)
 
 	// Collections
 	putBytes(in []byte) error
 	putVarintBytes(in []byte) error
+	putCompactBytes(in []byte) error
 	putRawBytes(in []byte) error
+	putCompactString(in string) error
+	putNullableCompactString(in *string) error
 	putString(in string) error
 	putNullableString(in *string) error
 	putStringArray(in []string) error
+	putCompactInt32Array(in []int32) error
+	putNullableCompactInt32Array(in []int32) error
 	putInt32Array(in []int32) error
 	putInt64Array(in []int64) error
+	putEmptyTaggedFieldArray()
 
 	// Provide the current offset to record the batch size metric
 	offset() int
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
index 6a708e7..a66e11e 100644
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -42,7 +42,7 @@
 
 type manualPartitioner struct{}
 
-// HashPartitionOption lets you modify default values of the partitioner
+// HashPartitionerOption lets you modify default values of the partitioner
 type HashPartitionerOption func(*hashPartitioner)
 
 // WithAbsFirst means that the partitioner handles absolute values
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
index b633cd1..0d01374 100644
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"math"
 
@@ -36,6 +37,11 @@
 	pe.length += binary.PutVarint(buf[:], in)
 }
 
+func (pe *prepEncoder) putUVarint(in uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutUvarint(buf[:], in)
+}
+
 func (pe *prepEncoder) putArrayLength(in int) error {
 	if in > math.MaxInt32 {
 		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
@@ -44,6 +50,10 @@
 	return nil
 }
 
+func (pe *prepEncoder) putCompactArrayLength(in int) {
+	pe.putUVarint(uint64(in + 1))
+}
+
 func (pe *prepEncoder) putBool(in bool) {
 	pe.length++
 }
@@ -67,6 +77,25 @@
 	return pe.putRawBytes(in)
 }
 
+func (pe *prepEncoder) putCompactBytes(in []byte) error {
+	pe.putUVarint(uint64(len(in) + 1))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putCompactString(in string) error {
+	pe.putCompactArrayLength(len(in))
+	return pe.putRawBytes([]byte(in))
+}
+
+func (pe *prepEncoder) putNullableCompactString(in *string) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	} else {
+		return pe.putCompactString(*in)
+	}
+}
+
 func (pe *prepEncoder) putRawBytes(in []byte) error {
 	if len(in) > math.MaxInt32 {
 		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
@@ -107,6 +136,27 @@
 	return nil
 }
 
+func (pe *prepEncoder) putCompactInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
 func (pe *prepEncoder) putInt32Array(in []int32) error {
 	err := pe.putArrayLength(len(in))
 	if err != nil {
@@ -125,6 +175,10 @@
 	return nil
 }
 
+func (pe *prepEncoder) putEmptyTaggedFieldArray() {
+	pe.putUVarint(0)
+}
+
 func (pe *prepEncoder) offset() int {
 	return pe.length
 }
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
index 0c755d0..0034651 100644
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -206,6 +206,10 @@
 	return r.Version
 }
 
+func (r *ProduceRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *ProduceRequest) requiredVersion() KafkaVersion {
 	switch r.Version {
 	case 1:
@@ -214,6 +218,8 @@
 		return V0_10_0_0
 	case 3:
 		return V0_11_0_0
+	case 7:
+		return V2_1_0_0
 	default:
 		return MinVersion
 	}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
index 4c5cd35..edf9787 100644
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -5,11 +5,27 @@
 	"time"
 )
 
+// Protocol, http://kafka.apache.org/protocol.html
+// v1
+// v2 = v3 = v4
+// v5 = v6 = v7
+// Produce Response (Version: 7) => [responses] throttle_time_ms
+//   responses => topic [partition_responses]
+//     topic => STRING
+//     partition_responses => partition error_code base_offset log_append_time log_start_offset
+//       partition => INT32
+//       error_code => INT16
+//       base_offset => INT64
+//       log_append_time => INT64
+//       log_start_offset => INT64
+//   throttle_time_ms => INT32
+
+// partition_responses in protocol
 type ProduceResponseBlock struct {
-	Err    KError
-	Offset int64
-	// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
-	Timestamp time.Time
+	Err         KError    // v0, error_code
+	Offset      int64     // v0, base_offset
+	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
+	StartOffset int64     // v5, log_start_offset
 }
 
 func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
@@ -32,6 +48,13 @@
 		}
 	}
 
+	if version >= 5 {
+		b.StartOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -49,13 +72,17 @@
 		pe.putInt64(timestamp)
 	}
 
+	if version >= 5 {
+		pe.putInt64(b.StartOffset)
+	}
+
 	return nil
 }
 
 type ProduceResponse struct {
-	Blocks       map[string]map[int32]*ProduceResponseBlock
+	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
 	Version      int16
-	ThrottleTime time.Duration // only provided if Version >= 1
+	ThrottleTime time.Duration // v1, throttle_time_ms
 }
 
 func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
@@ -129,6 +156,7 @@
 			}
 		}
 	}
+
 	if r.Version >= 1 {
 		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
 	}
@@ -143,17 +171,12 @@
 	return r.Version
 }
 
+func (r *ProduceResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *ProduceResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
+	return MinVersion
 }
 
 func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
index bba0f7e..9c70f81 100644
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -13,17 +13,22 @@
 }
 
 type produceSet struct {
-	parent *asyncProducer
-	msgs   map[string]map[int32]*partitionSet
+	parent        *asyncProducer
+	msgs          map[string]map[int32]*partitionSet
+	producerID    int64
+	producerEpoch int16
 
 	bufferBytes int
 	bufferCount int
 }
 
 func newProduceSet(parent *asyncProducer) *produceSet {
+	pid, epoch := parent.txnmgr.getProducerID()
 	return &produceSet{
-		msgs:   make(map[string]map[int32]*partitionSet),
-		parent: parent,
+		msgs:          make(map[string]map[int32]*partitionSet),
+		parent:        parent,
+		producerID:    pid,
+		producerEpoch: epoch,
 	}
 }
 
@@ -44,9 +49,10 @@
 	}
 
 	timestamp := msg.Timestamp
-	if msg.Timestamp.IsZero() {
+	if timestamp.IsZero() {
 		timestamp = time.Now()
 	}
+	timestamp = timestamp.Truncate(time.Millisecond)
 
 	partitions := ps.msgs[msg.Topic]
 	if partitions == nil {
@@ -64,8 +70,8 @@
 				Version:          2,
 				Codec:            ps.parent.conf.Producer.Compression,
 				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-				ProducerID:       ps.parent.txnmgr.producerID,
-				ProducerEpoch:    ps.parent.txnmgr.producerEpoch,
+				ProducerID:       ps.producerID,
+				ProducerEpoch:    ps.producerEpoch,
 			}
 			if ps.parent.conf.Producer.Idempotent {
 				batch.FirstSequence = msg.sequenceNumber
@@ -77,12 +83,17 @@
 		}
 		partitions[msg.Partition] = set
 	}
-	set.msgs = append(set.msgs, msg)
 
 	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
 		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
 			return errors.New("assertion failed: message out of sequence added to a batch")
 		}
+	}
+
+	// Past this point we can't return an error, because we've already added the message to the set.
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
 		// We are being conservative here to avoid having to prep encode the record
 		size += maximumRecordOverhead
 		rec := &Record{
@@ -128,6 +139,10 @@
 		req.Version = 3
 	}
 
+	if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
+		req.Version = 7
+	}
+
 	for topic, partitionSets := range ps.msgs {
 		for partition, set := range partitionSets {
 			if req.Version >= 3 {
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
index 085cbb3..2482c63 100644
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -5,13 +5,15 @@
 	"math"
 )
 
-var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
-var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
-var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
-var errInvalidStringLength = PacketDecodingError{"invalid string length"}
-var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
-var errVarintOverflow = PacketDecodingError{"varint overflow"}
-var errInvalidBool = PacketDecodingError{"invalid bool"}
+var (
+	errInvalidArrayLength      = PacketDecodingError{"invalid array length"}
+	errInvalidByteSliceLength  = PacketDecodingError{"invalid byteslice length"}
+	errInvalidStringLength     = PacketDecodingError{"invalid string length"}
+	errVarintOverflow          = PacketDecodingError{"varint overflow"}
+	errUVarintOverflow         = PacketDecodingError{"uvarint overflow"}
+	errInvalidBool             = PacketDecodingError{"invalid bool"}
+	errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"}
+)
 
 type realDecoder struct {
 	raw   []byte
@@ -75,6 +77,22 @@
 	return tmp, nil
 }
 
+func (rd *realDecoder) getUVarint() (uint64, error) {
+	tmp, n := binary.Uvarint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	if n < 0 {
+		rd.off -= n
+		return 0, errUVarintOverflow
+	}
+
+	rd.off += n
+	return tmp, nil
+}
+
 func (rd *realDecoder) getArrayLength() (int, error) {
 	if rd.remaining() < 4 {
 		rd.off = len(rd.raw)
@@ -91,6 +109,19 @@
 	return tmp, nil
 }
 
+func (rd *realDecoder) getCompactArrayLength() (int, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if n == 0 {
+		return 0, nil
+	}
+
+	return int(n) - 1, nil
+}
+
 func (rd *realDecoder) getBool() (bool, error) {
 	b, err := rd.getInt8()
 	if err != nil || b == 0 {
@@ -102,6 +133,19 @@
 	return true, nil
 }
 
+func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if tagCount != 0 {
+		return 0, errUnsupportedTaggedFields
+	}
+
+	return 0, nil
+}
+
 // collections
 
 func (rd *realDecoder) getBytes() ([]byte, error) {
@@ -128,6 +172,16 @@
 	return rd.getRawBytes(int(tmp))
 }
 
+func (rd *realDecoder) getCompactBytes() ([]byte, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+	return rd.getRawBytes(length)
+}
+
 func (rd *realDecoder) getStringLength() (int, error) {
 	length, err := rd.getInt16()
 	if err != nil {
@@ -169,6 +223,57 @@
 	return &tmpStr, err
 }
 
+func (rd *realDecoder) getCompactString() (string, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return "", err
+	}
+
+	length := int(n - 1)
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getCompactNullableString() (*string, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+
+	if length < 0 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getCompactInt32Array() ([]int32, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	arrayLength := int(n) - 1
+
+	ret := make([]int32, arrayLength)
+
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
 func (rd *realDecoder) getInt32Array() ([]int32, error) {
 	if rd.remaining() < 4 {
 		rd.off = len(rd.raw)
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
index 3c75387..c07204c 100644
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/binary"
+	"errors"
 
 	"github.com/rcrowley/go-metrics"
 )
@@ -39,11 +40,20 @@
 	re.off += binary.PutVarint(re.raw[re.off:], in)
 }
 
+func (re *realEncoder) putUVarint(in uint64) {
+	re.off += binary.PutUvarint(re.raw[re.off:], in)
+}
+
 func (re *realEncoder) putArrayLength(in int) error {
 	re.putInt32(int32(in))
 	return nil
 }
 
+func (re *realEncoder) putCompactArrayLength(in int) {
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(in + 1))
+}
+
 func (re *realEncoder) putBool(in bool) {
 	if in {
 		re.putInt8(1)
@@ -78,6 +88,24 @@
 	return re.putRawBytes(in)
 }
 
+func (re *realEncoder) putCompactBytes(in []byte) error {
+	re.putUVarint(uint64(len(in) + 1))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putCompactString(in string) error {
+	re.putCompactArrayLength(len(in))
+	return re.putRawBytes([]byte(in))
+}
+
+func (re *realEncoder) putNullableCompactString(in *string) error {
+	if in == nil {
+		re.putInt8(0)
+		return nil
+	}
+	return re.putCompactString(*in)
+}
+
 func (re *realEncoder) putString(in string) error {
 	re.putInt16(int16(len(in)))
 	copy(re.raw[re.off:], in)
@@ -108,6 +136,31 @@
 	return nil
 }
 
+func (re *realEncoder) putCompactInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putNullableCompactInt32Array(in []int32) error {
+	if in == nil {
+		re.putUVarint(0)
+		return nil
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
 func (re *realEncoder) putInt32Array(in []int32) error {
 	err := re.putArrayLength(len(in))
 	if err != nil {
@@ -130,6 +183,10 @@
 	return nil
 }
 
+func (re *realEncoder) putEmptyTaggedFieldArray() {
+	re.putUVarint(0)
+}
+
 func (re *realEncoder) offset() int {
 	return re.off
 }
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
index cdccfe3..a3fe8c0 100644
--- a/vendor/github.com/Shopify/sarama/record.go
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -11,7 +11,7 @@
 	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
 )
 
-//RecordHeader stores key and value for a record header
+// RecordHeader stores key and value for a record header
 type RecordHeader struct {
 	Key   []byte
 	Value []byte
@@ -35,7 +35,7 @@
 	return nil
 }
 
-//Record is kafka record type
+// Record is kafka record type
 type Record struct {
 	Headers []*RecordHeader
 
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
index 98160c7..f4c5e95 100644
--- a/vendor/github.com/Shopify/sarama/records.go
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -8,7 +8,6 @@
 	defaultRecords
 
 	magicOffset = 16
-	magicLength = 1
 )
 
 // Records implements a union type containing either a RecordBatch or a legacy MessageSet.
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
index 5ed8ca4..d899df5 100644
--- a/vendor/github.com/Shopify/sarama/request.go
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -11,6 +11,7 @@
 	versionedDecoder
 	key() int16
 	version() int16
+	headerVersion() int16
 	requiredVersion() KafkaVersion
 }
 
@@ -26,12 +27,19 @@
 	pe.putInt16(r.body.version())
 	pe.putInt32(r.correlationID)
 
-	err := pe.putString(r.clientID)
-	if err != nil {
-		return err
+	if r.body.headerVersion() >= 1 {
+		err := pe.putString(r.clientID)
+		if err != nil {
+			return err
+		}
 	}
 
-	err = r.body.encode(pe)
+	if r.body.headerVersion() >= 2 {
+		// we don't use tag headers at the moment so we just put an array length of 0
+		pe.putUVarint(0)
+	}
+
+	err := r.body.encode(pe)
 	if err != nil {
 		return err
 	}
@@ -65,6 +73,14 @@
 		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
 	}
 
+	if r.body.headerVersion() >= 2 {
+		// tagged field
+		_, err = pd.getUVarint()
+		if err != nil {
+			return err
+		}
+	}
+
 	return r.body.decode(pd, version)
 }
 
@@ -105,7 +121,7 @@
 	case 0:
 		return &ProduceRequest{}
 	case 1:
-		return &FetchRequest{}
+		return &FetchRequest{Version: version}
 	case 2:
 		return &OffsetRequest{Version: version}
 	case 3:
@@ -113,7 +129,7 @@
 	case 8:
 		return &OffsetCommitRequest{Version: version}
 	case 9:
-		return &OffsetFetchRequest{}
+		return &OffsetFetchRequest{Version: version}
 	case 10:
 		return &FindCoordinatorRequest{}
 	case 11:
@@ -158,12 +174,24 @@
 		return &DescribeConfigsRequest{}
 	case 33:
 		return &AlterConfigsRequest{}
+	case 35:
+		return &DescribeLogDirsRequest{}
 	case 36:
 		return &SaslAuthenticateRequest{}
 	case 37:
 		return &CreatePartitionsRequest{}
 	case 42:
 		return &DeleteGroupsRequest{}
+	case 44:
+		return &IncrementalAlterConfigsRequest{}
+	case 45:
+		return &AlterPartitionReassignmentsRequest{}
+	case 46:
+		return &ListPartitionReassignmentsRequest{}
+	case 50:
+		return &DescribeUserScramCredentialsRequest{}
+	case 51:
+		return &AlterUserScramCredentialsRequest{}
 	}
 	return nil
 }
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
index 7a75918..fbcef0b 100644
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -2,15 +2,17 @@
 
 import "fmt"
 
-const responseLengthSize = 4
-const correlationIDSize = 4
+const (
+	responseLengthSize = 4
+	correlationIDSize  = 4
+)
 
 type responseHeader struct {
 	length        int32
 	correlationID int32
 }
 
-func (r *responseHeader) decode(pd packetDecoder) (err error) {
+func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
 	r.length, err = pd.getInt32()
 	if err != nil {
 		return err
@@ -20,5 +22,12 @@
 	}
 
 	r.correlationID, err = pd.getInt32()
+
+	if version >= 1 {
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
 	return err
 }
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
index 1e0277a..48f362d 100644
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -39,6 +39,10 @@
 	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
 	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
 	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
+	| requests-in-flight                           | counter    | The current number of in-flight requests awaiting a response  |
+	|                                              |            | for all brokers                                               |
+	| requests-in-flight-for-broker-<broker-id>    | counter    | The current number of in-flight requests awaiting a response  |
+	|                                              |            | for a given broker                                            |
 	+----------------------------------------------+------------+---------------------------------------------------------------+
 
 Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
index 54c8b09..90504df 100644
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
@@ -24,6 +24,10 @@
 	return 0
 }
 
+func (r *SaslAuthenticateRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
index 0038c3f..3ef57b5 100644
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
@@ -39,6 +39,10 @@
 	return 0
 }
 
+func (r *SaslAuthenticateResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
 	return V1_0_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
index fe5ba05..74dc307 100644
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -29,6 +29,10 @@
 	return r.Version
 }
 
+func (r *SaslHandshakeRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
index ef290d4..69dfc31 100644
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -33,6 +33,10 @@
 	return 0
 }
 
+func (r *SaslHandshakeResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
 	return V0_10_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/Shopify/sarama/scram_formatter.go
new file mode 100644
index 0000000..2af9e4a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/scram_formatter.go
@@ -0,0 +1,78 @@
+package sarama
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"crypto/sha512"
+	"hash"
+)
+
+// ScramFormatter implementation
+// @see: https://github.com/apache/kafka/blob/99b9b3e84f4e98c3f07714e1de6a139a004cbc5b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramFormatter.java#L93
+type scramFormatter struct {
+	mechanism ScramMechanismType
+}
+
+func (s scramFormatter) mac(key []byte) (hash.Hash, error) {
+	var m hash.Hash
+
+	switch s.mechanism {
+	case SCRAM_MECHANISM_SHA_256:
+		m = hmac.New(sha256.New, key)
+
+	case SCRAM_MECHANISM_SHA_512:
+		m = hmac.New(sha512.New, key)
+	default:
+		return nil, ErrUnknownScramMechanism
+	}
+
+	return m, nil
+}
+
+func (s scramFormatter) hmac(key []byte, extra []byte) ([]byte, error) {
+	mac, err := s.mac(key)
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err := mac.Write(extra); err != nil {
+		return nil, err
+	}
+	return mac.Sum(nil), nil
+}
+
+func (s scramFormatter) xor(result []byte, second []byte) {
+	for i := 0; i < len(result); i++ {
+		result[i] = result[i] ^ second[i]
+	}
+}
+
+func (s scramFormatter) saltedPassword(password []byte, salt []byte, iterations int) ([]byte, error) {
+	mac, err := s.mac(password)
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err := mac.Write(salt); err != nil {
+		return nil, err
+	}
+	if _, err := mac.Write([]byte{0, 0, 0, 1}); err != nil {
+		return nil, err
+	}
+
+	u1 := mac.Sum(nil)
+	prev := u1
+	result := u1
+
+	for i := 2; i <= iterations; i++ {
+		ui, err := s.hmac(password, prev)
+		if err != nil {
+			return nil, err
+		}
+
+		s.xor(result, ui)
+		prev = ui
+	}
+
+	return result, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
new file mode 100644
index 0000000..161233f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
@@ -0,0 +1,124 @@
+package sarama
+
+type topicPartitionAssignment struct {
+	Topic     string
+	Partition int32
+}
+
+type StickyAssignorUserData interface {
+	partitions() []topicPartitionAssignment
+	hasGeneration() bool
+	generation() int
+}
+
+// StickyAssignorUserDataV0 holds topic partition information for an assignment
+type StickyAssignorUserDataV0 struct {
+	Topics map[string][]int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV0) hasGeneration() bool                    { return false }
+func (m *StickyAssignorUserDataV0) generation() int                        { return defaultGeneration }
+
+// StickyAssignorUserDataV1 holds topic partition information for an assignment
+type StickyAssignorUserDataV1 struct {
+	Topics     map[string][]int32
+	Generation int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(m.Generation)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	m.Generation, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV1) hasGeneration() bool                    { return true }
+func (m *StickyAssignorUserDataV1) generation() int                        { return int(m.Generation) }
+
+func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment {
+	topicPartitions := make([]topicPartitionAssignment, 0)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition})
+		}
+	}
+	return topicPartitions
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
index fe20708..ac6ecb1 100644
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -77,6 +77,10 @@
 	return 0
 }
 
+func (r *SyncGroupRequest) headerVersion() int16 {
+	return 1
+}
+
 func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
index 194b382..af019c4 100644
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -36,6 +36,10 @@
 	return 0
 }
 
+func (r *SyncGroupResponse) headerVersion() int16 {
+	return 0
+}
+
 func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
 	return V0_9_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
index 71e95b8..c4043a3 100644
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -91,6 +91,10 @@
 	return 0
 }
 
+func (a *TxnOffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
 func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
index 6c980f4..94d8029 100644
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -78,6 +78,10 @@
 	return 0
 }
 
+func (a *TxnOffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
 func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
 	return V0_11_0_0
 }
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
index 7c815cd..1859d29 100644
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -26,9 +26,7 @@
 
 func dupInt32Slice(input []int32) []int32 {
 	ret := make([]int32, 0, len(input))
-	for _, val := range input {
-		ret = append(ret, val)
-	}
+	ret = append(ret, input...)
 	return ret
 }
 
@@ -161,6 +159,11 @@
 	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
 	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
 	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
+	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
+	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
+	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
+	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
 
 	SupportedVersions = []KafkaVersion{
 		V0_8_2_0,
@@ -185,15 +188,21 @@
 		V2_1_0_0,
 		V2_2_0_0,
 		V2_3_0_0,
+		V2_4_0_0,
+		V2_5_0_0,
+		V2_6_0_0,
+		V2_7_0_0,
+		V2_8_0_0,
 	}
-	MinVersion = V0_8_2_0
-	MaxVersion = V2_3_0_0
+	MinVersion     = V0_8_2_0
+	MaxVersion     = V2_8_0_0
+	DefaultVersion = V1_0_0_0
 )
 
-//ParseKafkaVersion parses and returns kafka version or error from a string
+// ParseKafkaVersion parses and returns kafka version or error from a string
 func ParseKafkaVersion(s string) (KafkaVersion, error) {
 	if len(s) < 5 {
-		return MinVersion, fmt.Errorf("invalid version `%s`", s)
+		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
 	}
 	var major, minor, veryMinor, patch uint
 	var err error
@@ -203,7 +212,7 @@
 		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
 	}
 	if err != nil {
-		return MinVersion, err
+		return DefaultVersion, err
 	}
 	return newKafkaVersion(major, minor, veryMinor, patch), nil
 }
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
new file mode 100644
index 0000000..e23bfc4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd.go
@@ -0,0 +1,18 @@
+package sarama
+
+import (
+	"github.com/klauspost/compress/zstd"
+)
+
+var (
+	zstdDec, _ = zstd.NewReader(nil)
+	zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
+)
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+	return zstdDec.DecodeAll(src, dst)
+}
+
+func zstdCompress(dst, src []byte) ([]byte, error) {
+	return zstdEnc.EncodeAll(src, dst), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd_cgo.go b/vendor/github.com/Shopify/sarama/zstd_cgo.go
deleted file mode 100644
index f5ccb31..0000000
--- a/vendor/github.com/Shopify/sarama/zstd_cgo.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build cgo
-
-package sarama
-
-import "github.com/DataDog/zstd"
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return zstd.Decompress(dst, src)
-}
-
-func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
-	return zstd.CompressLevel(dst, src, level)
-}
diff --git a/vendor/github.com/Shopify/sarama/zstd_fallback.go b/vendor/github.com/Shopify/sarama/zstd_fallback.go
deleted file mode 100644
index 381a56b..0000000
--- a/vendor/github.com/Shopify/sarama/zstd_fallback.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !cgo
-
-package sarama
-
-import (
-	"errors"
-)
-
-var errZstdCgo = errors.New("zstd compression requires building with cgo enabled")
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return nil, errZstdCgo
-}
-
-func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
-	return nil, errZstdCgo
-}