[SEBA-881] move device-management to go mod
Change-Id: Idf082ee75c157f72fd4173f653d9e13c1c55fcd3
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 0000000..6e362e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,27 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+coverage.txt
+profile.out
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 0000000..d609423
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,38 @@
+dist: xenial
+language: go
+go:
+- 1.11.x
+- 1.12.x
+- 1.13.x
+
+env:
+ global:
+ - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+ - TOXIPROXY_ADDR=http://localhost:8474
+ - KAFKA_INSTALL_ROOT=/home/travis/kafka
+ - KAFKA_HOSTNAME=localhost
+ - DEBUG=true
+ matrix:
+ - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
+ - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+- vagrant/run_java_producer.sh
+
+install: make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- if [[ "$TRAVIS_GO_VERSION" == 1.13* ]]; then make fmt; fi
+
+after_success:
+- go tool cover -func coverage.txt
+- bash <(curl -s https://codecov.io/bash)
+
+after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 0000000..dfa7e75
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,843 @@
+# Changelog
+
+#### Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+ ([1520](https://github.com/Shopify/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+ ([1518](https://github.com/Shopify/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+ ([1525](https://github.com/Shopify/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+ ([1529](https://github.com/Shopify/sarama/pull/1529)).
+
+#### Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+ ([1416](https://github.com/Shopify/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+ ([1477](https://github.com/Shopify/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+ ([1415](https://github.com/Shopify/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+ ([1452](https://github.com/Shopify/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+ ([1458](https://github.com/Shopify/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+ ([1461](https://github.com/Shopify/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+ ([1464](https://github.com/Shopify/sarama/pull/1464)).
+- Remove direct usage of gofork
+ ([1465](https://github.com/Shopify/sarama/pull/1465)).
+- Add support for Go 1.13
+ ([1478](https://github.com/Shopify/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+ ([1481](https://github.com/Shopify/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+ ([1434](https://github.com/Shopify/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+ ([1442](https://github.com/Shopify/sarama/pull/1442)).
+- Use released version of lz4 library
+ ([1469](https://github.com/Shopify/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+ ([1484](https://github.com/Shopify/sarama/pull/1484)).
+- Fix CLI help message typo
+ ([1494](https://github.com/Shopify/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+ See https://github.com/Shopify/sarama/issues/1252
+
+#### Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+ ([1425](https://github.com/Shopify/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+ ([1428](https://github.com/Shopify/sarama/pull/1428)).
+
+#### Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+ ([1418](https://github.com/Shopify/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+ ([1374](https://github.com/Shopify/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+ ([1417](https://github.com/Shopify/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+ ([1410](https://github.com/Shopify/sarama/pull/1410)).
+- Add kerberos support
+ ([1366](https://github.com/Shopify/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+ ([1406](https://github.com/Shopify/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+ ([1404](https://github.com/Shopify/sarama/pull/1404)).
+- Add support for error codes 77—81
+ ([1397](https://github.com/Shopify/sarama/pull/1397)).
+- Pool internal objects allocated per message
+ ([1385](https://github.com/Shopify/sarama/pull/1385)).
+- Reduce packet decoder allocations
+ ([1373](https://github.com/Shopify/sarama/pull/1373)).
+- Support timeout when fetching metadata
+ ([1359](https://github.com/Shopify/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+ ([1376](https://github.com/Shopify/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+ ([1383](https://github.com/Shopify/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+ ([1368](https://github.com/Shopify/sarama/pull/1368)).
+
+#### Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+ ([1350](https://github.com/Shopify/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+ ([1354](https://github.com/Shopify/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+ ([1353](https://github.com/Shopify/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+ ([1349](https://github.com/Shopify/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+ ([1344](https://github.com/Shopify/sarama/pull/1344)).
+
+#### Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+ ([1318](https://github.com/Shopify/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+ ([1326](https://github.com/Shopify/sarama/pull/1326)).
+- Implement ReadCommitted
+ ([1307](https://github.com/Shopify/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+ ([1331](https://github.com/Shopify/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+ ([1331](https://github.com/Shopify/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+ ([1232](https://github.com/Shopify/sarama/pull/1232)).
+- Add SCRAM authentication example
+ ([1303](https://github.com/Shopify/sarama/pull/1303)).
+- Add consumergroup examples
+ ([1304](https://github.com/Shopify/sarama/pull/1304)).
+- Expose consumer batch size metric
+ ([1296](https://github.com/Shopify/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+ ([1300](https://github.com/Shopify/sarama/pull/1300)).
+- Reduce client close bookkeeping
+ ([1297](https://github.com/Shopify/sarama/pull/1297)).
+- Satisfy error interface in create responses
+ ([1154](https://github.com/Shopify/sarama/pull/1154)).
+- Please lint gods
+ ([1346](https://github.com/Shopify/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+ ([1338](https://github.com/Shopify/sarama/pull/1338)).
+- Update lz4 to latest version
+ ([1347](https://github.com/Shopify/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+ ([1231](https://github.com/Shopify/sarama/pull/1231)).
+- Fix cleanup error handler
+ ([1332](https://github.com/Shopify/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+ ([1156](https://github.com/Shopify/sarama/pull/1156)).
+
+#### Version 1.21.0 (2019-02-24)
+
+New Features:
+- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
+ ([1236](https://github.com/Shopify/sarama/pull/1236)).
+- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
+ ([1178](https://github.com/Shopify/sarama/pull/1178)).
+- Implement SASL/OAUTHBEARER
+ ([1240](https://github.com/Shopify/sarama/pull/1240)).
+
+Improvements:
+- Add Go mod support
+ ([1282](https://github.com/Shopify/sarama/pull/1282)).
+- Add error codes 73—76
+ ([1239](https://github.com/Shopify/sarama/pull/1239)).
+- Add retry backoff function
+ ([1160](https://github.com/Shopify/sarama/pull/1160)).
+- Maintain metadata in the producer even when retries are disabled
+ ([1189](https://github.com/Shopify/sarama/pull/1189)).
+- Include ReplicaAssignment in ListTopics
+ ([1274](https://github.com/Shopify/sarama/pull/1274)).
+- Add producer performance tool
+ ([1222](https://github.com/Shopify/sarama/pull/1222)).
+- Add support LogAppend timestamps
+ ([1258](https://github.com/Shopify/sarama/pull/1258)).
+
+Bug Fixes:
+- Fix potential deadlock when a heartbeat request fails
+ ([1286](https://github.com/Shopify/sarama/pull/1286)).
+- Fix consuming compacted topic
+ ([1227](https://github.com/Shopify/sarama/pull/1227)).
+- Set correct Kafka version for DescribeConfigsRequest v1
+ ([1277](https://github.com/Shopify/sarama/pull/1277)).
+- Update kafka test version
+ ([1273](https://github.com/Shopify/sarama/pull/1273)).
+
+#### Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+ ([1100](https://github.com/Shopify/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+ ([1230](https://github.com/Shopify/sarama/pull/1230)).
+- Reuse compression objects
+ ([1185](https://github.com/Shopify/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+ ([1243](https://github.com/Shopify/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+ ([1242](https://github.com/Shopify/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+ ([1244](https://github.com/Shopify/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+ ([1203](https://github.com/Shopify/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+ ([1141](https://github.com/Shopify/sarama/pull/1141)).
+
+#### Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+ ([#1170](https://github.com/Shopify/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+ ([#1152](https://github.com/Shopify/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+ ([#1229](https://github.com/Shopify/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+ ([#1201](https://github.com/Shopify/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+ ([#1198](https://github.com/Shopify/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+ ([#1173](https://github.com/Shopify/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+ ([#1202](https://github.com/Shopify/sarama/pull/1202)).
+ - Add error codes 61 to 72
+ ([#1195](https://github.com/Shopify/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+ ([#1182](https://github.com/Shopify/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+ ([#1209](https://github.com/Shopify/sarama/pull/1209)).
+ - Fix typos in code and comments
+ ([#1228](https://github.com/Shopify/sarama/pull/1228)).
+
+#### Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+ ([#1099](https://github.com/Shopify/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+ ([#1176](https://github.com/Shopify/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+ ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+ ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+
+#### Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+ ([#1112](https://github.com/Shopify/sarama/pull/1112)).
+ - Add customizable partitioner
+ ([#1118](https://github.com/Shopify/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+ `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+ ([#1055](https://github.com/Shopify/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+ ([#1149](https://github.com/Shopify/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+ ([#1123](https://github.com/Shopify/sarama/pull/1123)).
+ - Simpler offset management
+ ([#1127](https://github.com/Shopify/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+ ([#1110](https://github.com/Shopify/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+ expected topic/partition blocks
+ ([#1086](https://github.com/Shopify/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+ ([#1115](https://github.com/Shopify/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+ ([#1142](https://github.com/Shopify/sarama/pull/1142)).
+ - Add version check when producing message with headers
+ ([#1117](https://github.com/Shopify/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+ ([#1132](https://github.com/Shopify/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+ ([#1125](https://github.com/Shopify/sarama/pull/1125)).
+
+#### Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+ ([#1044](https://github.com/Shopify/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+ ([#1047](https://github.com/Shopify/sarama/pull/1047),
+ [#1069](https://github.com/Shopify/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+ ([#1098](https://github.com/Shopify/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+ ([#1065](https://github.com/Shopify/sarama/pull/1065),
+ [#1096](https://github.com/Shopify/sarama/pull/1096),
+ [#1027](https://github.com/Shopify/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+ ([#1063](https://github.com/Shopify/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+ ([#1010](https://github.com/Shopify/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+ ([#1049](https://github.com/Shopify/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+ ([#1052](https://github.com/Shopify/sarama/pull/1052)).
+ - Add support for Go 1.10
+ ([#1064](https://github.com/Shopify/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+ ([#1073](https://github.com/Shopify/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+ ([#1050](https://github.com/Shopify/sarama/pull/1050),
+ [#1051](https://github.com/Shopify/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+ ([#1033](https://github.com/Shopify/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+ ([#1092](https://github.com/Shopify/sarama/pull/1092)).
+
+#### Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+ ([#1007](https://github.com/Shopify/sarama/pull/1007),
+ [#1008](https://github.com/Shopify/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+ ([#1009](https://github.com/Shopify/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+ ([#1016](https://github.com/Shopify/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+ ([#999](https://github.com/Shopify/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+ ([#1019](https://github.com/Shopify/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+ which results in much higher throughput in most cases
+ ([#1024](https://github.com/Shopify/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+ reduce CPU and memory usage when processing many partitions
+ ([#1028](https://github.com/Shopify/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+ recompression pass
+ ([#1002](https://github.com/Shopify/sarama/pull/1002),
+ [#1015](https://github.com/Shopify/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+ ([#1032](https://github.com/Shopify/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+ ([#1005](https://github.com/Shopify/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+ ([#1021](https://github.com/Shopify/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+ ([#1022](https://github.com/Shopify/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+ truncated before the magic value indicating its version
+ ([#1030](https://github.com/Shopify/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+ ([#1035](https://github.com/Shopify/sarama/pull/1035)).
+
+#### Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+ ([#984](https://github.com/Shopify/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+ ([#989](https://github.com/Shopify/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+ ([#985](https://github.com/Shopify/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+ ([#986](https://github.com/Shopify/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+ ([#982](https://github.com/Shopify/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+ ([#990](https://github.com/Shopify/sarama/pull/990)).
+ - Fix producing with multiple headers
+ ([#996](https://github.com/Shopify/sarama/pull/996)).
+ - Fix handling of truncated record batches
+ ([#998](https://github.com/Shopify/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+ ([#991](https://github.com/Shopify/sarama/pull/991)).
+
+#### Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+ protocol and the necessary behavioural changes in the producer and consumer.
+ Transactions and idempotency are not yet supported, but producing and
+ consuming should work with all the existing bells and whistles (batching,
+ compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+ of Arista Networks for this work. Part of
+ ([#901](https://github.com/Shopify/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+ ([#970](https://github.com/Shopify/sarama/pull/970)).
+ - Return partial replicas list when we have it
+ ([#975](https://github.com/Shopify/sarama/pull/975)).
+
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+ ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+ ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+ extremely large clusters
+ ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+ ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+ ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+ by the broker, which can be meaningful
+ ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+ variance in the actual timeout
+ ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+ ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+ ([#940](https://github.com/Shopify/sarama/pull/940)).
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+ version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+ that you still need to specify the Kafka version in the Sarama configuration
+ for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+ active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+ in-sync broker IDs for the given partition, now that the Kafka versions for
+ which this was misleading are no longer in our supported set
+ ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+ partitioner with a custom hash method in case the default (FNV-1a) is not
+ suitable
+ ([#837](https://github.com/Shopify/sarama/pull/837),
+ [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+ correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+ transparent if you're using the `AddGroupProtocol` or
+ `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+ the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+ ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+ ([#701](https://github.com/Shopify/sarama/pull/701),
+ [#746](https://github.com/Shopify/sarama/pull/746),
+ [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+ ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+ ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+ `HighWaterMarkOffset` values of its child topic/partitions
+ ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+ ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+ ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+ ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+ ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+ ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+ ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+ ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+ in v1.10.0. Go's timers are finicky
+ ([#730](https://github.com/Shopify/sarama/pull/730),
+ [#733](https://github.com/Shopify/sarama/pull/733),
+ [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+ ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+ ([#672](https://github.com/Shopify/sarama/pull/672),
+ [#678](https://github.com/Shopify/sarama/pull/678),
+ [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+ ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+ ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+ ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+ ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+ misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+ ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+ ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+ ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+ ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+ ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+ ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+ ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+ ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+ implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+ ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+ ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+ ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+ ([#605](https://github.com/Shopify/sarama/pull/605),
+ [#621](https://github.com/Shopify/sarama/pull/621),
+ [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+ ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+ - All protocol messages and fields
+ ([#586](https://github.com/Shopify/sarama/pull/586),
+ [#588](https://github.com/Shopify/sarama/pull/588),
+ [#590](https://github.com/Shopify/sarama/pull/590)).
+ - Verified that TLS support works
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+ - Fixed the OffsetManager compatibility
+ ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+ ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+ ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/Shopify/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/Shopify/sarama/pull/549),
+ [#550](https://github.com/Shopify/sarama/pull/550),
+ [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+ [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/Shopify/sarama/pull/450),
+ [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/Shopify/sarama/pull/439),
+ [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/Shopify/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+ [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 0000000..d2bf435
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Shopify
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 0000000..9c8329e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,56 @@
+export GO111MODULE=on
+
+default: fmt vet errcheck test lint
+
+# Taken from https://github.com/codecov/example-go#caveat-multiple-files
+.PHONY: test
+test:
+ echo "mode: atomic" > coverage.txt
+ for d in `go list ./...`; do \
+ go test -p 1 -v -timeout 6m -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
+ if [ -f profile.out ]; then \
+ tail +2 profile.out >> coverage.txt; \
+ rm profile.out; \
+ fi \
+ done
+
+GOLINT := $(shell command -v golint)
+
+.PHONY: lint
+lint:
+ifndef GOLINT
+ go get golang.org/x/lint/golint
+endif
+ go list ./... | xargs golint
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+ERRCHECK := $(shell command -v errcheck)
+# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
+.PHONY: errcheck
+errcheck:
+ifndef ERRCHECK
+ go get github.com/kisielk/errcheck
+endif
+ errcheck -ignorepkg fmt github.com/Shopify/sarama/...
+
+.PHONY: fmt
+fmt:
+ @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+.PHONY : install_dependencies
+install_dependencies: get
+
+.PHONY: get
+get:
+ go get -v ./...
+
+.PHONY: clean
+clean:
+ go clean ./...
+
+.PHONY: tidy
+tidy:
+ go mod tidy -v
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 0000000..0206fac
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,36 @@
+# sarama
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+## Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+## Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.11 through 1.13, and Kafka 2.1 through 2.3, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+## Contributing
+
+- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 0000000..f4b848a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provision :shell, path: "vagrant/provision.sh"
+
+ config.vm.network "private_network", ip: "192.168.100.67"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = MEMORY
+ end
+end
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
new file mode 100644
index 0000000..50b689d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -0,0 +1,138 @@
+package sarama
+
+//Resource holds information about acl resource type
+type Resource struct {
+ ResourceType AclResourceType
+ ResourceName string
+ ResourcePatternType AclResourcePatternType
+}
+
+func (r *Resource) encode(pe packetEncoder, version int16) error {
+ pe.putInt8(int8(r.ResourceType))
+
+ if err := pe.putString(r.ResourceName); err != nil {
+ return err
+ }
+
+ if version == 1 {
+ if r.ResourcePatternType == AclPatternUnknown {
+ Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
+ r.ResourcePatternType = AclPatternLiteral
+ }
+ pe.putInt8(int8(r.ResourcePatternType))
+ }
+
+ return nil
+}
+
+func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
+ resourceType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.ResourceType = AclResourceType(resourceType)
+
+ if r.ResourceName, err = pd.getString(); err != nil {
+ return err
+ }
+ if version == 1 {
+ pattern, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.ResourcePatternType = AclResourcePatternType(pattern)
+ }
+
+ return nil
+}
+
+//Acl holds information about acl type
+type Acl struct {
+ Principal string
+ Host string
+ Operation AclOperation
+ PermissionType AclPermissionType
+}
+
+func (a *Acl) encode(pe packetEncoder) error {
+ if err := pe.putString(a.Principal); err != nil {
+ return err
+ }
+
+ if err := pe.putString(a.Host); err != nil {
+ return err
+ }
+
+ pe.putInt8(int8(a.Operation))
+ pe.putInt8(int8(a.PermissionType))
+
+ return nil
+}
+
+func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
+ if a.Principal, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if a.Host, err = pd.getString(); err != nil {
+ return err
+ }
+
+ operation, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Operation = AclOperation(operation)
+
+ permissionType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.PermissionType = AclPermissionType(permissionType)
+
+ return nil
+}
+
+//ResourceAcls is an acl resource type
+type ResourceAcls struct {
+ Resource
+ Acls []*Acl
+}
+
+func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
+ if err := r.Resource.encode(pe, version); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Acls)); err != nil {
+ return err
+ }
+ for _, acl := range r.Acls {
+ if err := acl.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
+ if err := r.Resource.decode(pd, version); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Acls = make([]*Acl, n)
+ for i := 0; i < n; i++ {
+ r.Acls[i] = new(Acl)
+ if err := r.Acls[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
new file mode 100644
index 0000000..da1cdef
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -0,0 +1,85 @@
+package sarama
+
+//CreateAclsRequest is an acl creation request
+type CreateAclsRequest struct {
+ Version int16
+ AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+ return err
+ }
+
+ for _, aclCreation := range c.AclCreations {
+ if err := aclCreation.encode(pe, c.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ c.Version = version
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.AclCreations = make([]*AclCreation, n)
+
+ for i := 0; i < n; i++ {
+ c.AclCreations[i] = new(AclCreation)
+ if err := c.AclCreations[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsRequest) key() int16 {
+ return 30
+}
+
+func (c *CreateAclsRequest) version() int16 {
+ return c.Version
+}
+
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+//AclCreation is a wrapper around Resource and Acl type
+type AclCreation struct {
+ Resource
+ Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder, version int16) error {
+ if err := a.Resource.encode(pe, version); err != nil {
+ return err
+ }
+ if err := a.Acl.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+ if err := a.Resource.decode(pd, version); err != nil {
+ return err
+ }
+ if err := a.Acl.decode(pd, version); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
new file mode 100644
index 0000000..f5a5e9a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -0,0 +1,90 @@
+package sarama
+
+import "time"
+
+//CreateAclsResponse is a an acl reponse creation type
+type CreateAclsResponse struct {
+ ThrottleTime time.Duration
+ AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+ return err
+ }
+
+ for _, aclCreationResponse := range c.AclCreationResponses {
+ if err := aclCreationResponse.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.AclCreationResponses = make([]*AclCreationResponse, n)
+ for i := 0; i < n; i++ {
+ c.AclCreationResponses[i] = new(AclCreationResponse)
+ if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsResponse) key() int16 {
+ return 30
+}
+
+func (c *CreateAclsResponse) version() int16 {
+ return 0
+}
+
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+//AclCreationResponse is an acl creation response type
+type AclCreationResponse struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(a.Err))
+
+ if err := pe.putNullableString(a.ErrMsg); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ a.Err = KError(kerr)
+
+ if a.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
new file mode 100644
index 0000000..15908ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -0,0 +1,58 @@
+package sarama
+
+//DeleteAclsRequest is a delete acl request
+type DeleteAclsRequest struct {
+ Version int
+ Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(d.Filters)); err != nil {
+ return err
+ }
+
+ for _, filter := range d.Filters {
+ filter.Version = d.Version
+ if err := filter.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ d.Version = int(version)
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ d.Filters = make([]*AclFilter, n)
+ for i := 0; i < n; i++ {
+ d.Filters[i] = new(AclFilter)
+ d.Filters[i].Version = int(version)
+ if err := d.Filters[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+ return 31
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
new file mode 100644
index 0000000..6529565
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -0,0 +1,159 @@
+package sarama
+
+import "time"
+
+//DeleteAclsResponse is a delete acl response
+type DeleteAclsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ FilterResponses []*FilterResponse
+}
+
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
+ return err
+ }
+
+ for _, filterResponse := range d.FilterResponses {
+ if err := filterResponse.encode(pe, d.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ d.FilterResponses = make([]*FilterResponse, n)
+
+ for i := 0; i < n; i++ {
+ d.FilterResponses[i] = new(FilterResponse)
+ if err := d.FilterResponses[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+ return 31
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+//FilterResponse is a filter response type
+type FilterResponse struct {
+ Err KError
+ ErrMsg *string
+ MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
+ pe.putInt16(int16(f.Err))
+ if err := pe.putNullableString(f.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+ return err
+ }
+ for _, matchingAcl := range f.MatchingAcls {
+ if err := matchingAcl.encode(pe, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ f.Err = KError(kerr)
+
+ if f.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ f.MatchingAcls = make([]*MatchingAcl, n)
+ for i := 0; i < n; i++ {
+ f.MatchingAcls[i] = new(MatchingAcl)
+ if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+//MatchingAcl is a matching acl type
+type MatchingAcl struct {
+ Err KError
+ ErrMsg *string
+ Resource
+ Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
+ pe.putInt16(int16(m.Err))
+ if err := pe.putNullableString(m.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := m.Resource.encode(pe, version); err != nil {
+ return err
+ }
+
+ if err := m.Acl.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ m.Err = KError(kerr)
+
+ if m.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ if err := m.Resource.decode(pd, version); err != nil {
+ return err
+ }
+
+ if err := m.Acl.decode(pd, version); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
new file mode 100644
index 0000000..5222d46
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -0,0 +1,35 @@
+package sarama
+
+//DescribeAclsRequest is a secribe acl request type
+type DescribeAclsRequest struct {
+ Version int
+ AclFilter
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+ d.AclFilter.Version = d.Version
+ return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ d.Version = int(version)
+ d.AclFilter.Version = int(version)
+ return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+ return 29
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
new file mode 100644
index 0000000..12126e5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+import "time"
+
+//DescribeAclsResponse is a describe acl response type
+type DescribeAclsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+ ErrMsg *string
+ ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+ pe.putInt16(int16(d.Err))
+
+ if err := pe.putNullableString(d.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+ return err
+ }
+
+ for _, resourceAcl := range d.ResourceAcls {
+ if err := resourceAcl.encode(pe, d.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ d.Err = KError(kerr)
+
+ errmsg, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if errmsg != "" {
+ d.ErrMsg = &errmsg
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ d.ResourceAcls = make([]*ResourceAcls, n)
+
+ for i := 0; i < n; i++ {
+ d.ResourceAcls[i] = new(ResourceAcls)
+ if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+ return 29
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
new file mode 100644
index 0000000..fad5558
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -0,0 +1,78 @@
+package sarama
+
+type AclFilter struct {
+ Version int
+ ResourceType AclResourceType
+ ResourceName *string
+ ResourcePatternTypeFilter AclResourcePatternType
+ Principal *string
+ Host *string
+ Operation AclOperation
+ PermissionType AclPermissionType
+}
+
+func (a *AclFilter) encode(pe packetEncoder) error {
+ pe.putInt8(int8(a.ResourceType))
+ if err := pe.putNullableString(a.ResourceName); err != nil {
+ return err
+ }
+
+ if a.Version == 1 {
+ pe.putInt8(int8(a.ResourcePatternTypeFilter))
+ }
+
+ if err := pe.putNullableString(a.Principal); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(a.Host); err != nil {
+ return err
+ }
+ pe.putInt8(int8(a.Operation))
+ pe.putInt8(int8(a.PermissionType))
+
+ return nil
+}
+
+func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
+ resourceType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.ResourceType = AclResourceType(resourceType)
+
+ if a.ResourceName, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ if a.Version == 1 {
+ pattern, err := pd.getInt8()
+
+ if err != nil {
+ return err
+ }
+
+ a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
+ }
+
+ if a.Principal, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ if a.Host, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ operation, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Operation = AclOperation(operation)
+
+ permissionType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.PermissionType = AclPermissionType(permissionType)
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
new file mode 100644
index 0000000..c10ad7b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -0,0 +1,55 @@
+package sarama
+
+type (
+ AclOperation int
+
+ AclPermissionType int
+
+ AclResourceType int
+
+ AclResourcePatternType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+ AclOperationUnknown AclOperation = iota
+ AclOperationAny
+ AclOperationAll
+ AclOperationRead
+ AclOperationWrite
+ AclOperationCreate
+ AclOperationDelete
+ AclOperationAlter
+ AclOperationDescribe
+ AclOperationClusterAction
+ AclOperationDescribeConfigs
+ AclOperationAlterConfigs
+ AclOperationIdempotentWrite
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+ AclPermissionUnknown AclPermissionType = iota
+ AclPermissionAny
+ AclPermissionDeny
+ AclPermissionAllow
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+ AclResourceUnknown AclResourceType = iota
+ AclResourceAny
+ AclResourceTopic
+ AclResourceGroup
+ AclResourceCluster
+ AclResourceTransactionalID
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+const (
+ AclPatternUnknown AclResourcePatternType = iota
+ AclPatternAny
+ AclPatternMatch
+ AclPatternLiteral
+ AclPatternPrefixed
+)
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..fc227ab
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,53 @@
+package sarama
+
+//AddOffsetsToTxnRequest adds offsets to a transaction request
+type AddOffsetsToTxnRequest struct {
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ GroupID string
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+
+ pe.putInt64(a.ProducerID)
+
+ pe.putInt16(a.ProducerEpoch)
+
+ if err := pe.putString(a.GroupID); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if a.GroupID, err = pd.getString(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+ return 25
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+ return 0
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..c88c1f8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,45 @@
+package sarama
+
+import (
+ "time"
+)
+
+//AddOffsetsToTxnResponse is a response type for adding offsets to txns
+type AddOffsetsToTxnResponse struct {
+ ThrottleTime time.Duration
+ Err KError
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+ pe.putInt16(int16(a.Err))
+ return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ a.Err = KError(kerr)
+
+ return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+ return 25
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+ return 0
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..8d4b42e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,77 @@
+package sarama
+
+//AddPartitionsToTxnRequest is a add paartition request
+type AddPartitionsToTxnRequest struct {
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+ pe.putInt64(a.ProducerID)
+ pe.putInt16(a.ProducerEpoch)
+
+ if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range a.TopicPartitions {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.TopicPartitions = make(map[string][]int32)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ a.TopicPartitions[topic] = partitions
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+ return 24
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+ return 0
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..eb4f23e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+ "time"
+)
+
+//AddPartitionsToTxnResponse is a partition errors to transaction type
+type AddPartitionsToTxnResponse struct {
+ ThrottleTime time.Duration
+ Errors map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+ if err := pe.putArrayLength(len(a.Errors)); err != nil {
+ return err
+ }
+
+ for topic, e := range a.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(e)); err != nil {
+ return err
+ }
+ for _, partitionError := range e {
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Errors = make(map[string][]*PartitionError)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Errors[topic] = make([]*PartitionError, m)
+
+ for j := 0; j < m; j++ {
+ a.Errors[topic][j] = new(PartitionError)
+ if err := a.Errors[topic][j].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+ return 24
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+ return 0
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+//PartitionError is a partition error type
+type PartitionError struct {
+ Partition int32
+ Err KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+ pe.putInt32(p.Partition)
+ pe.putInt16(int16(p.Err))
+ return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+ if p.Partition, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ p.Err = KError(kerr)
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
new file mode 100644
index 0000000..6c9b1e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -0,0 +1,690 @@
+package sarama
+
+import (
+ "errors"
+ "math/rand"
+ "sync"
+)
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+ // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+ // It may take several seconds after CreateTopic returns success for all the brokers
+ // to become aware that the topic has been created. During this time, listTopics
+ // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+ CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+ // List the topics available in the cluster with the default options.
+ ListTopics() (map[string]TopicDetail, error)
+
+ // Describe some topics in the cluster.
+ DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
+
+ // Delete a topic. It may take several seconds after the DeleteTopic to returns success
+ // and for all the brokers to become aware that the topics are gone.
+ // During this time, listTopics may continue to return information about the deleted topic.
+ // If delete.topic.enable is false on the brokers, deleteTopic will mark
+ // the topic for deletion, but not actually delete them.
+ // This operation is supported by brokers with version 0.10.1.0 or higher.
+ DeleteTopic(topic string) error
+
+ // Increase the number of partitions of the topics according to the corresponding values.
+ // If partitions are increased for a topic that has a key, the partition logic or ordering of
+ // the messages will be affected. It may take several seconds after this method returns
+ // success for all the brokers to become aware that the partitions have been created.
+ // During this time, ClusterAdmin#describeTopics may not return information about the
+ // new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+ CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+ // Delete records whose offset is smaller than the given offset of the corresponding partition.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+ // Get the configuration for the specified resources.
+ // The returned configuration includes default values and the Default is true
+ // can be used to distinguish them from user supplied values.
+ // Config entries where ReadOnly is true cannot be updated.
+ // The value of config entries where Sensitive is true is always nil so
+ // sensitive information is not disclosed.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+ // Update the configuration for the specified resources with the default options.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ // The resources with their configs (topic is the only resource type with configs
+ // that can be updated currently Updates are not transactional so they may succeed
+ // for some resources while fail for others. The configs for a particular resource are updated automatically.
+ AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+ // Creates access control lists (ACLs) which are bound to specific resources.
+ // This operation is not transactional so it may succeed for some ACLs while fail for others.
+ // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+ // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+ CreateACL(resource Resource, acl Acl) error
+
+ // Lists access control lists (ACLs) according to the supplied filter.
+ // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+ // Deletes access control lists (ACLs) according to the supplied filters.
+ // This operation is not transactional so it may succeed for some ACLs while fail for others.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+ // List the consumer groups available in the cluster.
+ ListConsumerGroups() (map[string]string, error)
+
+ // Describe the given consumer groups.
+ DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
+
+ // List the consumer group offsets available in the cluster.
+ ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
+
+ // Delete a consumer group.
+ DeleteConsumerGroup(group string) error
+
+ // Get information about the nodes in the cluster
+ DescribeCluster() (brokers []*Broker, controllerID int32, err error)
+
+ // Close shuts down the admin and closes underlying client.
+ Close() error
+}
+
+type clusterAdmin struct {
+ client Client
+ conf *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+ return NewClusterAdminFromClient(client)
+}
+
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+ //make sure we can retrieve the controller
+ _, err := client.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ ca := &clusterAdmin{
+ client: client,
+ conf: client.Config(),
+ }
+ return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+ return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+ return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ if detail == nil {
+ return errors.New("you must specify topic details")
+ }
+
+ topicDetails := make(map[string]*TopicDetail)
+ topicDetails[topic] = detail
+
+ request := &CreateTopicsRequest{
+ TopicDetails: topicDetails,
+ ValidateOnly: validateOnly,
+ Timeout: ca.conf.Admin.Timeout,
+ }
+
+ if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+ if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+ request.Version = 2
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.CreateTopics(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicErrors[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if topicErr.Err != ErrNoError {
+ return topicErr
+ }
+
+ return nil
+}
+
+func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
+ controller, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ request := &MetadataRequest{
+ Topics: topics,
+ AllowAutoTopicCreation: false,
+ }
+
+ if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+ request.Version = 5
+ } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 4
+ }
+
+ response, err := controller.GetMetadata(request)
+ if err != nil {
+ return nil, err
+ }
+ return response.Topics, nil
+}
+
+func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
+ controller, err := ca.Controller()
+ if err != nil {
+ return nil, int32(0), err
+ }
+
+ request := &MetadataRequest{
+ Topics: []string{},
+ }
+
+ if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+
+ response, err := controller.GetMetadata(request)
+ if err != nil {
+ return nil, int32(0), err
+ }
+
+ return response.Brokers, response.ControllerID, nil
+}
+
+func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
+ brokers := ca.client.Brokers()
+ if len(brokers) > 0 {
+ index := rand.Intn(len(brokers))
+ return brokers[index], nil
+ }
+ return nil, errors.New("no available broker")
+}
+
+func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
+ // In order to build TopicDetails we need to first get the list of all
+ // topics using a MetadataRequest and then get their configs using a
+ // DescribeConfigsRequest request. To avoid sending many requests to the
+ // broker, we use a single DescribeConfigsRequest.
+
+ // Send the all-topic MetadataRequest
+ b, err := ca.findAnyBroker()
+ if err != nil {
+ return nil, err
+ }
+ _ = b.Open(ca.client.Config())
+
+ metadataReq := &MetadataRequest{}
+ metadataResp, err := b.GetMetadata(metadataReq)
+ if err != nil {
+ return nil, err
+ }
+
+ topicsDetailsMap := make(map[string]TopicDetail)
+
+ var describeConfigsResources []*ConfigResource
+
+ for _, topic := range metadataResp.Topics {
+ topicDetails := TopicDetail{
+ NumPartitions: int32(len(topic.Partitions)),
+ }
+ if len(topic.Partitions) > 0 {
+ topicDetails.ReplicaAssignment = map[int32][]int32{}
+ for _, partition := range topic.Partitions {
+ topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
+ }
+ topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
+ }
+ topicsDetailsMap[topic.Name] = topicDetails
+
+ // we populate the resources we want to describe from the MetadataResponse
+ topicResource := ConfigResource{
+ Type: TopicResource,
+ Name: topic.Name,
+ }
+ describeConfigsResources = append(describeConfigsResources, &topicResource)
+ }
+
+ // Send the DescribeConfigsRequest
+ describeConfigsReq := &DescribeConfigsRequest{
+ Resources: describeConfigsResources,
+ }
+ describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, resource := range describeConfigsResp.Resources {
+ topicDetails := topicsDetailsMap[resource.Name]
+ topicDetails.ConfigEntries = make(map[string]*string)
+
+ for _, entry := range resource.Configs {
+ // only include non-default non-sensitive config
+ // (don't actually think topic config will ever be sensitive)
+ if entry.Default || entry.Sensitive {
+ continue
+ }
+ topicDetails.ConfigEntries[entry.Name] = &entry.Value
+ }
+
+ topicsDetailsMap[resource.Name] = topicDetails
+ }
+
+ return topicsDetailsMap, nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ request := &DeleteTopicsRequest{
+ Topics: []string{topic},
+ Timeout: ca.conf.Admin.Timeout,
+ }
+
+ if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.DeleteTopics(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicErrorCodes[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if topicErr != ErrNoError {
+ return topicErr
+ }
+ return nil
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ topicPartitions := make(map[string]*TopicPartition)
+ topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
+
+ request := &CreatePartitionsRequest{
+ TopicPartitions: topicPartitions,
+ Timeout: ca.conf.Admin.Timeout,
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.CreatePartitions(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicPartitionErrors[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if topicErr.Err != ErrNoError {
+ return topicErr
+ }
+
+ return nil
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+ partitionPerBroker := make(map[*Broker][]int32)
+ for partition := range partitionOffsets {
+ broker, err := ca.client.Leader(topic, partition)
+ if err != nil {
+ return err
+ }
+ if _, ok := partitionPerBroker[broker]; ok {
+ partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+ } else {
+ partitionPerBroker[broker] = []int32{partition}
+ }
+ }
+ errs := make([]error, 0)
+ for broker, partitions := range partitionPerBroker {
+ topics := make(map[string]*DeleteRecordsRequestTopic)
+ recordsToDelete := make(map[int32]int64)
+ for _, p := range partitions {
+ recordsToDelete[p] = partitionOffsets[p]
+ }
+ topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
+ request := &DeleteRecordsRequest{
+ Topics: topics,
+ Timeout: ca.conf.Admin.Timeout,
+ }
+
+ rsp, err := broker.DeleteRecords(request)
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+ if !ok {
+ errs = append(errs, ErrIncompleteResponse)
+ } else {
+ for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+ if deleteRecordsResponsePartition.Err != ErrNoError {
+ errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
+ }
+ }
+ }
+ }
+ }
+ if len(errs) > 0 {
+ return ErrDeleteRecords{MultiError{&errs}}
+ }
+ //todo since we are dealing with couple of partitions it would be good if we return slice of errors
+ //for each partition instead of one error
+ return nil
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+
+ var entries []ConfigEntry
+ var resources []*ConfigResource
+ resources = append(resources, &resource)
+
+ request := &DescribeConfigsRequest{
+ Resources: resources,
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DescribeConfigs(request)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rspResource := range rsp.Resources {
+ if rspResource.Name == resource.Name {
+ if rspResource.ErrorMsg != "" {
+ return nil, errors.New(rspResource.ErrorMsg)
+ }
+ for _, cfgEntry := range rspResource.Configs {
+ entries = append(entries, *cfgEntry)
+ }
+ }
+ }
+ return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+
+ var resources []*AlterConfigsResource
+ resources = append(resources, &AlterConfigsResource{
+ Type: resourceType,
+ Name: name,
+ ConfigEntries: entries,
+ })
+
+ request := &AlterConfigsRequest{
+ Resources: resources,
+ ValidateOnly: validateOnly,
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.AlterConfigs(request)
+ if err != nil {
+ return err
+ }
+
+ for _, rspResource := range rsp.Resources {
+ if rspResource.Name == name {
+ if rspResource.ErrorMsg != "" {
+ return errors.New(rspResource.ErrorMsg)
+ }
+ }
+ }
+ return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+ var acls []*AclCreation
+ acls = append(acls, &AclCreation{resource, acl})
+ request := &CreateAclsRequest{AclCreations: acls}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ _, err = b.CreateAcls(request)
+ return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+
+ request := &DescribeAclsRequest{AclFilter: filter}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DescribeAcls(request)
+ if err != nil {
+ return nil, err
+ }
+
+ var lAcls []ResourceAcls
+ for _, rAcl := range rsp.ResourceAcls {
+ lAcls = append(lAcls, *rAcl)
+ }
+ return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+ var filters []*AclFilter
+ filters = append(filters, &filter)
+ request := &DeleteAclsRequest{Filters: filters}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DeleteAcls(request)
+ if err != nil {
+ return nil, err
+ }
+
+ var mAcls []MatchingAcl
+ for _, fr := range rsp.FilterResponses {
+ for _, mACL := range fr.MatchingAcls {
+ mAcls = append(mAcls, *mACL)
+ }
+
+ }
+ return mAcls, nil
+}
+
+func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
+ groupsPerBroker := make(map[*Broker][]string)
+
+ for _, group := range groups {
+ controller, err := ca.client.Coordinator(group)
+ if err != nil {
+ return nil, err
+ }
+ groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
+
+ }
+
+ for broker, brokerGroups := range groupsPerBroker {
+ response, err := broker.DescribeGroups(&DescribeGroupsRequest{
+ Groups: brokerGroups,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, response.Groups...)
+ }
+ return result, nil
+}
+
+func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
+ allGroups = make(map[string]string)
+
+ // Query brokers in parallel, since we have to query *all* brokers
+ brokers := ca.client.Brokers()
+ groupMaps := make(chan map[string]string, len(brokers))
+ errors := make(chan error, len(brokers))
+ wg := sync.WaitGroup{}
+
+ for _, b := range brokers {
+ wg.Add(1)
+ go func(b *Broker, conf *Config) {
+ defer wg.Done()
+ _ = b.Open(conf) // Ensure that broker is opened
+
+ response, err := b.ListGroups(&ListGroupsRequest{})
+ if err != nil {
+ errors <- err
+ return
+ }
+
+ groups := make(map[string]string)
+ for group, typ := range response.Groups {
+ groups[group] = typ
+ }
+
+ groupMaps <- groups
+
+ }(b, ca.conf)
+ }
+
+ wg.Wait()
+ close(groupMaps)
+ close(errors)
+
+ for groupMap := range groupMaps {
+ for group, protocolType := range groupMap {
+ allGroups[group] = protocolType
+ }
+ }
+
+ // Intentionally return only the first error for simplicity
+ err = <-errors
+ return
+}
+
+func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return nil, err
+ }
+
+ request := &OffsetFetchRequest{
+ ConsumerGroup: group,
+ partitions: topicPartitions,
+ }
+
+ if ca.conf.Version.IsAtLeast(V0_10_2_0) {
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
+ request.Version = 1
+ }
+
+ return coordinator.FetchOffset(request)
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return err
+ }
+
+ request := &DeleteGroupsRequest{
+ Groups: []string{group},
+ }
+
+ resp, err := coordinator.DeleteGroups(request)
+ if err != nil {
+ return err
+ }
+
+ groupErr, ok := resp.GroupErrorCodes[group]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if groupErr != ErrNoError {
+ return groupErr
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
new file mode 100644
index 0000000..26c275b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -0,0 +1,122 @@
+package sarama
+
+//AlterConfigsRequest is an alter config request type
+type AlterConfigsRequest struct {
+ Resources []*AlterConfigsResource
+ ValidateOnly bool
+}
+
+//AlterConfigsResource is an alter config resource type
+type AlterConfigsResource struct {
+ Type ConfigResourceType
+ Name string
+ ConfigEntries map[string]*string
+}
+
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for _, r := range a.Resources {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putBool(a.ValidateOnly)
+ return nil
+}
+
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+ resourceCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*AlterConfigsResource, resourceCount)
+ for i := range a.Resources {
+ r := &AlterConfigsResource{}
+ err = r.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ a.Resources[i] = r
+ }
+
+ validateOnly, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ a.ValidateOnly = validateOnly
+
+ return nil
+}
+
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+ pe.putInt8(int8(a.Type))
+
+ if err := pe.putString(a.Name); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+ return err
+ }
+ for configKey, configValue := range a.ConfigEntries {
+ if err := pe.putString(configKey); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(configValue); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Name = name
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ a.ConfigEntries = make(map[string]*string, n)
+ for i := 0; i < n; i++ {
+ configKey, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+ }
+ return err
+}
+
+func (a *AlterConfigsRequest) key() int16 {
+ return 33
+}
+
+func (a *AlterConfigsRequest) version() int16 {
+ return 0
+}
+
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
new file mode 100644
index 0000000..3893663
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -0,0 +1,97 @@
+package sarama
+
+import "time"
+
+//AlterConfigsResponse is a reponse type for alter config
+type AlterConfigsResponse struct {
+ ThrottleTime time.Duration
+ Resources []*AlterConfigsResourceResponse
+}
+
+//AlterConfigsResourceResponse is a reponse type for alter config resource
+type AlterConfigsResourceResponse struct {
+ ErrorCode int16
+ ErrorMsg string
+ Type ConfigResourceType
+ Name string
+}
+
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for i := range a.Resources {
+ pe.putInt16(a.Resources[i].ErrorCode)
+ err := pe.putString(a.Resources[i].ErrorMsg)
+ if err != nil {
+ return nil
+ }
+ pe.putInt8(int8(a.Resources[i].Type))
+ err = pe.putString(a.Resources[i].Name)
+ if err != nil {
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ responseCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+ for i := range a.Resources {
+ a.Resources[i] = new(AlterConfigsResourceResponse)
+
+ errCode, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ a.Resources[i].ErrorCode = errCode
+
+ e, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Resources[i].ErrorMsg = e
+
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Resources[i].Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Resources[i].Name = name
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResponse) key() int16 {
+ return 32
+}
+
+func (a *AlterConfigsResponse) version() int16 {
+ return 0
+}
+
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 0000000..b33167c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,25 @@
+package sarama
+
+//ApiVersionsRequest ...
+type ApiVersionsRequest struct {
+}
+
+func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (a *ApiVersionsRequest) key() int16 {
+ return 18
+}
+
+func (a *ApiVersionsRequest) version() int16 {
+ return 0
+}
+
+func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 0000000..bb1f0b3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,89 @@
+package sarama
+
+//ApiVersionsResponseBlock is an api version reponse block type
+type ApiVersionsResponseBlock struct {
+ ApiKey int16
+ MinVersion int16
+ MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+ pe.putInt16(b.ApiKey)
+ pe.putInt16(b.MinVersion)
+ pe.putInt16(b.MaxVersion)
+ return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+ var err error
+
+ if b.ApiKey, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MinVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MaxVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+//ApiVersionsResponse is an api version response type
+type ApiVersionsResponse struct {
+ Err KError
+ ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+ return err
+ }
+ for _, apiVersion := range r.ApiVersions {
+ if err := apiVersion.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+ for i := 0; i < numBlocks; i++ {
+ block := new(ApiVersionsResponseBlock)
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.ApiVersions[i] = block
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 0000000..9b15cd1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,1116 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+ // AsyncClose triggers a shutdown of the producer. The shutdown has completed
+ // when both the Errors and Successes channels have been closed. When calling
+ // AsyncClose, you *must* continue to read from those channels in order to
+ // drain the results of any messages in flight.
+ AsyncClose()
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when Return.Successes is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+ producerID int64
+ producerEpoch int16
+ sequenceNumbers map[string]int32
+ mutex sync.Mutex
+}
+
+const (
+ noProducerID = -1
+ noProducerEpoch = -1
+)
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
+ key := fmt.Sprintf("%s-%d", topic, partition)
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ sequence := t.sequenceNumbers[key]
+ t.sequenceNumbers[key] = sequence + 1
+ return sequence
+}
+
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+ txnmgr := &transactionManager{
+ producerID: noProducerID,
+ producerEpoch: noProducerEpoch,
+ }
+
+ if conf.Producer.Idempotent {
+ initProducerIDResponse, err := client.InitProducerID()
+ if err != nil {
+ return nil, err
+ }
+ txnmgr.producerID = initProducerIDResponse.ProducerID
+ txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
+ txnmgr.sequenceNumbers = make(map[string]int32)
+ txnmgr.mutex = sync.Mutex{}
+
+ Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
+ }
+
+ return txnmgr, nil
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]*brokerProducer
+ brokerRefs map[*brokerProducer]int
+ brokerLock sync.Mutex
+
+ txnmgr *transactionManager
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+ return newAsyncProducer(client)
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ txnmgr, err := newTransactionManager(client.Config(), client)
+ if err != nil {
+ return nil, err
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]*brokerProducer),
+ brokerRefs: make(map[*brokerProducer]int),
+ txnmgr: txnmgr,
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // The headers are key-value pairs that are transparently passed
+ // by Kafka between producers and consumers.
+ Headers []RecordHeader
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+ // Timestamp can vary in behaviour depending on broker configuration, being
+ // in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+ // and requiring version at least 0.10.0.
+ //
+ // When configured to CreateTime, the timestamp is specified by the producer
+ // either by explicitly setting this field, or when the message is added
+ // to a produce set.
+ //
+ // When configured to LogAppendTime, the timestamp assigned to the message
+ // by the broker. This is only guaranteed to be defined if the message was
+ // successfully delivered and RequiredAcks is not NoResponse.
+ Timestamp time.Time
+
+ retries int
+ flags flagSet
+ expectation chan *ProducerError
+ sequenceNumber int32
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize(version int) int {
+ var size int
+ if version >= 2 {
+ size = maximumRecordOverhead
+ for _, h := range m.Headers {
+ size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+ }
+ } else {
+ size = producerMessageOverhead
+ }
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for range p.successes {
+ }
+ })
+ }
+
+ var errors ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ errors = append(errors, event)
+ }
+ } else {
+ <-p.errors
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ } else if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ }
+
+ version := 1
+ if p.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ } else if msg.Headers != nil {
+ p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+ continue
+ }
+ if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ErrMessageSizeTooLarge)
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+ // All messages being retried (sent or not) have already had their retry count updated
+ if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
+ msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ var requiresConsistency = false
+ if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+ requiresConsistency = ep.MessageRequiresConsistency(msg)
+ } else {
+ requiresConsistency = tp.partitioner.RequiresConsistency()
+ }
+
+ if requiresConsistency {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ brokerProducer *brokerProducer
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) backoff(retries int) {
+ var backoff time.Duration
+ if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
+ maxRetries := pp.parent.conf.Producer.Retry.Max
+ backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
+ } else {
+ backoff = pp.parent.conf.Producer.Retry.Backoff
+ }
+ if backoff > 0 {
+ time.Sleep(backoff)
+ }
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ defer func() {
+ if pp.brokerProducer != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ }
+ }()
+
+ for msg := range pp.input {
+ if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
+ select {
+ case <-pp.brokerProducer.abandoned:
+ // a message on the abandoned channel means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ pp.brokerProducer = nil
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ default:
+ // producer connection is still open.
+ }
+ }
+
+ if msg.retries > pp.highWatermark {
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ pp.backoff(msg.retries)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+
+ if pp.brokerProducer == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ pp.backoff(msg.retries)
+ continue
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ pp.brokerProducer.input <- msg
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.brokerProducer == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.brokerProducer.input <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ stopchan: make(chan struct{}),
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ for set := range bridge {
+ request := set.buildRequest()
+
+ response, err := broker.Produce(request)
+
+ responses <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ }
+ close(responses)
+ })
+
+ if p.conf.Producer.Retry.Max <= 0 {
+ bp.abandoned = make(chan struct{})
+ }
+
+ return bp
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+ abandoned chan struct{}
+ stopchan chan struct{}
+
+ buffer *produceSet
+ timer <-chan time.Time
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg, ok := <-bp.input:
+ if !ok {
+ Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
+ bp.shutdown()
+ return
+ }
+
+ if msg == nil {
+ continue
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ if err := bp.waitForSpace(msg); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+ }
+ case <-bp.timer:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ case response, ok := <-bp.responses:
+ if ok {
+ bp.handleResponse(response)
+ }
+ case <-bp.stopchan:
+ Logger.Printf(
+ "producer/broker/%d run loop asked to stop\n", bp.broker.ID())
+ return
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+}
+
+func (bp *brokerProducer) shutdown() {
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+ close(bp.stopchan)
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ var retryTopics []string
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(pSet.msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+ for _, msg := range pSet.msgs {
+ msg.Timestamp = block.Timestamp
+ }
+ }
+ for i, msg := range pSet.msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(pSet.msgs)
+ // Duplicate
+ case ErrDuplicateSequenceNumber:
+ bp.parent.returnSuccesses(pSet.msgs)
+ // Retriable errors
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ if bp.parent.conf.Producer.Retry.Max <= 0 {
+ bp.parent.abandonBrokerConnection(bp.broker)
+ bp.parent.returnErrors(pSet.msgs, block.Err)
+ } else {
+ retryTopics = append(retryTopics, topic)
+ }
+ // Other non-retriable errors
+ default:
+ if bp.parent.conf.Producer.Retry.Max <= 0 {
+ bp.parent.abandonBrokerConnection(bp.broker)
+ }
+ bp.parent.returnErrors(pSet.msgs, block.Err)
+ }
+ })
+
+ if len(retryTopics) > 0 {
+ if bp.parent.conf.Producer.Idempotent {
+ err := bp.parent.client.RefreshMetadata(retryTopics...)
+ if err != nil {
+ Logger.Printf("Failed refreshing metadata because of %v\n", err)
+ }
+ }
+
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ // handled in the previous "eachPartition" loop
+ return
+ }
+
+ switch block.Err {
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ if bp.currentRetries[topic] == nil {
+ bp.currentRetries[topic] = make(map[int32]error)
+ }
+ bp.currentRetries[topic][partition] = block.Err
+ if bp.parent.conf.Producer.Idempotent {
+ go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+ } else {
+ bp.parent.retryMessages(pSet.msgs, block.Err)
+ }
+ // dropping the following messages has the side effect of incrementing their retry count
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ }
+ })
+ }
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+ Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+ produceSet := newProduceSet(p)
+ produceSet.msgs[topic] = make(map[int32]*partitionSet)
+ produceSet.msgs[topic][partition] = pSet
+ produceSet.bufferBytes += pSet.bufferBytes
+ produceSet.bufferCount += len(pSet.msgs)
+ for _, msg := range pSet.msgs {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, kerr)
+ return
+ }
+ msg.retries++
+ }
+
+ // it's expected that a metadata refresh has been requested prior to calling retryBatch
+ leader, err := p.client.Leader(topic, partition)
+ if err != nil {
+ Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+ for _, msg := range pSet.msgs {
+ p.returnError(msg, kerr)
+ }
+ return
+ }
+ bp := p.getBrokerProducer(leader)
+ bp.output <- produceSet
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ switch err.(type) {
+ case PacketEncodingError:
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.returnErrors(pSet.msgs, err)
+ })
+ default:
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.retryMessages(pSet.msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.retryMessages(pSet.msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ buf.Remove()
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp.input)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bc, ok := p.brokers[broker]
+ if ok && bc.abandoned != nil {
+ close(bc.abandoned)
+ }
+
+ delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
new file mode 100644
index 0000000..67c4d96
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -0,0 +1,1053 @@
+package sarama
+
+import (
+ "container/heap"
+ "math"
+ "sort"
+ "strings"
+)
+
+const (
+ // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+ RangeBalanceStrategyName = "range"
+
+ // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+ RoundRobinBalanceStrategyName = "roundrobin"
+
+ // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+ StickyBalanceStrategyName = "sticky"
+
+ defaultGeneration = -1
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+ if len(partitions) == 0 {
+ return
+ }
+ if _, ok := p[memberID]; !ok {
+ p[memberID] = make(map[string][]int32, 1)
+ }
+ p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across members of a consumer group
+type BalanceStrategy interface {
+ // Name uniquely identifies the strategy.
+ Name() string
+
+ // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+ // and returns a distribution plan.
+ Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
+// Example with one topic T with six partitions (0..5) and two members (M1, M2):
+// M1: {T: [0, 1, 2]}
+// M2: {T: [3, 4, 5]}
+var BalanceStrategyRange = &balanceStrategy{
+ name: RangeBalanceStrategyName,
+ coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+ step := float64(len(partitions)) / float64(len(memberIDs))
+
+ for i, memberID := range memberIDs {
+ pos := float64(i)
+ min := int(math.Floor(pos*step + 0.5))
+ max := int(math.Floor((pos+1)*step + 0.5))
+ plan.Add(memberID, topic, partitions[min:max]...)
+ }
+ },
+}
+
+// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+// M1: {T: [0, 2, 4]}
+// M2: {T: [1, 3, 5]}
+var BalanceStrategyRoundRobin = &balanceStrategy{
+ name: RoundRobinBalanceStrategyName,
+ coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+ for i, part := range partitions {
+ memberID := memberIDs[i%len(memberIDs)]
+ plan.Add(memberID, topic, part)
+ }
+ },
+}
+
+// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+// M1: {T: [0, 2, 4]}
+// M2: {T: [1, 3, 5]}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+// M1: {T: [0, 2]}
+// M2: {T: [1, 3]}
+// M3: {T: [4, 5]}
+//
+var BalanceStrategySticky = &stickyBalanceStrategy{}
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+ name string
+ coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Plan implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+ // Build members by topic map
+ mbt := make(map[string][]string)
+ for memberID, meta := range members {
+ for _, topic := range meta.Topics {
+ mbt[topic] = append(mbt[topic], memberID)
+ }
+ }
+
+ // Sort members for each topic
+ for topic, memberIDs := range mbt {
+ sort.Sort(&balanceStrategySortable{
+ topic: topic,
+ memberIDs: memberIDs,
+ })
+ }
+
+ // Assemble plan
+ plan := make(BalanceStrategyPlan, len(members))
+ for topic, memberIDs := range mbt {
+ s.coreFn(plan, memberIDs, topic, topics[topic])
+ }
+ return plan, nil
+}
+
+type balanceStrategySortable struct {
+ topic string
+ memberIDs []string
+}
+
+func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
+func (p balanceStrategySortable) Swap(i, j int) {
+ p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
+}
+func (p balanceStrategySortable) Less(i, j int) bool {
+ return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
+}
+
+func balanceStrategyHashValue(vv ...string) uint32 {
+ h := uint32(2166136261)
+ for _, s := range vv {
+ for _, c := range s {
+ h ^= uint32(c)
+ h *= 16777619
+ }
+ }
+ return h
+}
+
+type stickyBalanceStrategy struct {
+ movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+ // track partition movements during generation of the partition assignment plan
+ s.movements = partitionMovements{
+ Movements: make(map[topicPartitionAssignment]consumerPair),
+ PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+ }
+
+ // prepopulate the current assignment state from userdata on the consumer group members
+ currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+ if err != nil {
+ return nil, err
+ }
+
+ // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+ isFreshAssignment := false
+ if len(currentAssignment) == 0 {
+ isFreshAssignment = true
+ }
+
+ // create a mapping of all current topic partitions and the consumers that can be assigned to them
+ partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+ for topic, partitions := range topics {
+ for _, partition := range partitions {
+ partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+ }
+ }
+
+ // create a mapping of all consumers to all potential topic partitions that can be assigned to them
+ // also, populate the mapping of partitions to potential consumers
+ consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+ for memberID, meta := range members {
+ consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+ for _, topicSubscription := range meta.Topics {
+ // only evaluate topic subscriptions that are present in the supplied topics map
+ if _, found := topics[topicSubscription]; found {
+ for _, partition := range topics[topicSubscription] {
+ topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+ consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+ partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+ }
+ }
+ }
+
+ // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+ if _, exists := currentAssignment[memberID]; !exists {
+ currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+ }
+ }
+
+ // create a mapping of each partition to its current consumer, where possible
+ currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+ unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+ for partition := range partition2AllPotentialConsumers {
+ unvisitedPartitions[partition] = true
+ }
+ var unassignedPartitions []topicPartitionAssignment
+ for memberID, partitions := range currentAssignment {
+ var keepPartitions []topicPartitionAssignment
+ for _, partition := range partitions {
+ // If this partition no longer exists at all, likely due to the
+ // topic being deleted, we remove the partition from the member.
+ if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+ continue
+ }
+ delete(unvisitedPartitions, partition)
+ currentPartitionConsumers[partition] = memberID
+
+ if !strsContains(members[memberID].Topics, partition.Topic) {
+ unassignedPartitions = append(unassignedPartitions, partition)
+ continue
+ }
+ keepPartitions = append(keepPartitions, partition)
+ }
+ currentAssignment[memberID] = keepPartitions
+ }
+ for unvisited := range unvisitedPartitions {
+ unassignedPartitions = append(unassignedPartitions, unvisited)
+ }
+
+ // sort the topic partitions in order of priority for reassignment
+ sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+ // at this point we have preserved all valid topic partition to consumer assignments and removed
+ // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+ // to consumers so that the topic partition assignments are as balanced as possible.
+
+ // an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+ sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+ s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+ // Assemble plan
+ plan := make(BalanceStrategyPlan, len(currentAssignment))
+ for memberID, assignments := range currentAssignment {
+ if len(assignments) == 0 {
+ plan[memberID] = make(map[string][]int32, 0)
+ } else {
+ for _, assignment := range assignments {
+ plan.Add(memberID, assignment.Topic, assignment.Partition)
+ }
+ }
+ }
+ return plan, nil
+}
+
+func strsContains(s []string, value string) bool {
+ for _, entry := range s {
+ if entry == value {
+ return true
+ }
+ }
+ return false
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+ initializing := false
+ if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
+ initializing = true
+ }
+
+ // assign all unassigned partitions
+ for _, partition := range unassignedPartitions {
+ // skip if there is no potential consumer for the partition
+ if len(partition2AllPotentialConsumers[partition]) == 0 {
+ continue
+ }
+ sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+ }
+
+ // narrow down the reassignment scope to only those partitions that can actually be reassigned
+ for partition := range partition2AllPotentialConsumers {
+ if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+ sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+ }
+ }
+
+ // narrow down the reassignment scope to only those consumers that are subject to reassignment
+ fixedAssignments := make(map[string][]topicPartitionAssignment)
+ for memberID := range consumer2AllPotentialPartitions {
+ if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+ fixedAssignments[memberID] = currentAssignment[memberID]
+ delete(currentAssignment, memberID)
+ sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+ }
+ }
+
+ // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+ preBalanceAssignment := deepCopyAssignment(currentAssignment)
+ preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
+ for k, v := range currentPartitionConsumer {
+ preBalancePartitionConsumers[k] = v
+ }
+
+ reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+ // if we are not preserving existing assignments and we have made changes to the current assignment
+ // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+ if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+ currentAssignment = deepCopyAssignment(preBalanceAssignment)
+ currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
+ for k, v := range preBalancePartitionConsumers {
+ currentPartitionConsumer[k] = v
+ }
+ }
+
+ // add the fixed assignments (those that could not change) back
+ for consumer, assignments := range fixedAssignments {
+ currentAssignment[consumer] = assignments
+ }
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+ consumer2AssignmentSize := make(map[string]int, len(assignment))
+ for memberID, partitions := range assignment {
+ consumer2AssignmentSize[memberID] = len(partitions)
+ }
+
+ var score float64
+ for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+ delete(consumer2AssignmentSize, memberID)
+ for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+ score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+ }
+ }
+ return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, allSubscriptions map[string][]topicPartitionAssignment) bool {
+ sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+ min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+ max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+ if min >= max-1 {
+ // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+ return true
+ }
+
+ // create a mapping from partitions to the consumer assigned to them
+ allPartitions := make(map[topicPartitionAssignment]string)
+ for memberID, partitions := range currentAssignment {
+ for _, partition := range partitions {
+ if _, exists := allPartitions[partition]; exists {
+ Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+ }
+ allPartitions[partition] = memberID
+ }
+ }
+
+ // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+ // could but did not get cannot be moved to it (because that would break the balance)
+ for _, memberID := range sortedCurrentSubscriptions {
+ consumerPartitions := currentAssignment[memberID]
+ consumerPartitionCount := len(consumerPartitions)
+
+ // skip if this consumer already has all the topic partitions it can get
+ if consumerPartitionCount == len(allSubscriptions[memberID]) {
+ continue
+ }
+
+ // otherwise make sure it cannot get any more
+ potentialTopicPartitions := allSubscriptions[memberID]
+ for _, partition := range potentialTopicPartitions {
+ if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+ otherConsumer := allPartitions[partition]
+ otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+ if consumerPartitionCount < otherConsumerPartitionCount {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+ reassignmentPerformed := false
+ modified := false
+
+ // repeat reassignment until no partition can be moved to improve the balance
+ for {
+ modified = false
+ // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+ // until the full list is processed or a balance is achieved
+ for _, partition := range reassignablePartitions {
+ if isBalanced(currentAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions) {
+ break
+ }
+
+ // the partition must have at least two consumers
+ if len(partition2AllPotentialConsumers[partition]) <= 1 {
+ Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+ }
+
+ // the partition must have a consumer
+ consumer := currentPartitionConsumer[partition]
+ if consumer == "" {
+ Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+ }
+
+ if _, exists := prevAssignment[partition]; exists {
+ if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+ sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+ reassignmentPerformed = true
+ modified = true
+ continue
+ }
+ }
+
+ // check if a better-suited consumer exists for the partition; if so, reassign it
+ for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+ if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+ sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+ reassignmentPerformed = true
+ modified = true
+ break
+ }
+ }
+ }
+ if !modified {
+ return reassignmentPerformed
+ }
+ }
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+ for _, anotherConsumer := range sortedCurrentSubscriptions {
+ if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+ return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+ }
+ }
+ return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+ consumer := currentPartitionConsumer[partition]
+ // find the correct partition movement considering the stickiness requirement
+ partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+ return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+ oldConsumer := currentPartitionConsumer[partition]
+ s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+ currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+ currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+ currentPartitionConsumer[partition] = newConsumer
+ return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+ currentPartitions := currentAssignment[memberID]
+ currentAssignmentSize := len(currentPartitions)
+ maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+ if currentAssignmentSize > maxAssignmentSize {
+ Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+ }
+ if currentAssignmentSize < maxAssignmentSize {
+ // if a consumer is not assigned all its potential partitions it is subject to reassignment
+ return true
+ }
+ for _, partition := range currentPartitions {
+ if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+ return true
+ }
+ }
+ return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+ return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+ for _, memberID := range sortedCurrentSubscriptions {
+ if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+ currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+ currentPartitionConsumer[partition] = memberID
+ break
+ }
+ }
+ return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+ userDataV1 := &StickyAssignorUserDataV1{}
+ if err := decode(userDataBytes, userDataV1); err != nil {
+ userDataV0 := &StickyAssignorUserDataV0{}
+ if err := decode(userDataBytes, userDataV0); err != nil {
+ return nil, err
+ }
+ return userDataV0, nil
+ }
+ return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+ assignments := deepCopyAssignment(currentAssignment)
+ for memberID, partitions := range assignments {
+ // perform in-place filtering
+ i := 0
+ for _, partition := range partitions {
+ if _, exists := partition2AllPotentialConsumers[partition]; exists {
+ partitions[i] = partition
+ i++
+ }
+ }
+ assignments[memberID] = partitions[:i]
+ }
+ return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+ for i, assignment := range assignments {
+ if assignment == topic {
+ return append(assignments[:i], assignments[i+1:]...)
+ }
+ }
+ return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+ for _, assignment := range assignments {
+ if assignment == topic {
+ return true
+ }
+ }
+ return false
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+ unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+ for partition := range partition2AllPotentialConsumers {
+ unassignedPartitions[partition] = true
+ }
+
+ sortedPartitions := make([]topicPartitionAssignment, 0)
+ if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+ // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+ // then we just need to simply list partitions in a round robin fashion (from consumers with
+ // most assigned partitions to those with least)
+ assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+ // use priority-queue to evaluate consumer group members in descending-order based on
+ // the number of topic partition assignments (i.e. consumers with most assignments first)
+ pq := make(assignmentPriorityQueue, len(assignments))
+ i := 0
+ for consumerID, consumerAssignments := range assignments {
+ pq[i] = &consumerGroupMember{
+ id: consumerID,
+ assignments: consumerAssignments,
+ }
+ i++
+ }
+ heap.Init(&pq)
+
+ for {
+ // loop until no consumer-group members remain
+ if pq.Len() == 0 {
+ break
+ }
+ member := pq[0]
+
+ // partitions that were assigned to a different consumer last time
+ var prevPartitionIndex int
+ for i, partition := range member.assignments {
+ if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+ prevPartitionIndex = i
+ break
+ }
+ }
+
+ if len(member.assignments) > 0 {
+ partition := member.assignments[prevPartitionIndex]
+ sortedPartitions = append(sortedPartitions, partition)
+ delete(unassignedPartitions, partition)
+ if prevPartitionIndex == 0 {
+ member.assignments = member.assignments[1:]
+ } else {
+ member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+ }
+ heap.Fix(&pq, 0)
+ } else {
+ heap.Pop(&pq)
+ }
+ }
+
+ for partition := range unassignedPartitions {
+ sortedPartitions = append(sortedPartitions, partition)
+ }
+ } else {
+ // an ascending sorted set of topic partitions based on how many consumers can potentially use them
+ sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+ }
+ return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+ // sort the members by the number of partition assignments in ascending order
+ sortedMemberIDs := make([]string, 0, len(assignments))
+ for memberID := range assignments {
+ sortedMemberIDs = append(sortedMemberIDs, memberID)
+ }
+ sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+ ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+ if ret == 0 {
+ return sortedMemberIDs[i] < sortedMemberIDs[j]
+ }
+ return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+ })
+ return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+ // sort the members by the number of partition assignments in descending order
+ sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+ i := 0
+ for partition := range partition2AllPotentialConsumers {
+ sortedPartionIDs[i] = partition
+ i++
+ }
+ sort.Slice(sortedPartionIDs, func(i, j int) bool {
+ if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+ ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+ if ret == 0 {
+ return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+ }
+ return ret < 0
+ }
+ return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+ })
+ return sortedPartionIDs
+}
+
+func deepCopyPartitions(src []topicPartitionAssignment) []topicPartitionAssignment {
+ dst := make([]topicPartitionAssignment, len(src))
+ for i, partition := range src {
+ dst[i] = partition
+ }
+ return dst
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+ copy := make(map[string][]topicPartitionAssignment, len(assignment))
+ for memberID, subscriptions := range assignment {
+ copy[memberID] = append(subscriptions[:0:0], subscriptions...)
+ }
+ return copy
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+ curMembers := make(map[string]int)
+ for _, cur := range partition2AllPotentialConsumers {
+ if len(curMembers) == 0 {
+ for _, curMembersElem := range cur {
+ curMembers[curMembersElem]++
+ }
+ continue
+ }
+
+ if len(curMembers) != len(cur) {
+ return false
+ }
+
+ yMap := make(map[string]int)
+ for _, yElem := range cur {
+ yMap[yElem]++
+ }
+
+ for curMembersMapKey, curMembersMapVal := range curMembers {
+ if yMap[curMembersMapKey] != curMembersMapVal {
+ return false
+ }
+ }
+ }
+
+ curPartitions := make(map[topicPartitionAssignment]int)
+ for _, cur := range consumer2AllPotentialPartitions {
+ if len(curPartitions) == 0 {
+ for _, curPartitionElem := range cur {
+ curPartitions[curPartitionElem]++
+ }
+ continue
+ }
+
+ if len(curPartitions) != len(cur) {
+ return false
+ }
+
+ yMap := make(map[topicPartitionAssignment]int)
+ for _, yElem := range cur {
+ yMap[yElem]++
+ }
+
+ for curMembersMapKey, curMembersMapVal := range curPartitions {
+ if yMap[curMembersMapKey] != curMembersMapVal {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+ currentAssignment := make(map[string][]topicPartitionAssignment)
+ prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+ // for each partition we create a sorted map of its consumers by generation
+ sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+ for memberID, meta := range members {
+ consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+ if err != nil {
+ return nil, nil, err
+ }
+ for _, partition := range consumerUserData.partitions() {
+ if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+ if consumerUserData.hasGeneration() {
+ if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+ // same partition is assigned to two consumers during the same rebalance.
+ // log a warning and skip this record
+ Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+ continue
+ } else {
+ consumers[consumerUserData.generation()] = memberID
+ }
+ } else {
+ consumers[defaultGeneration] = memberID
+ }
+ } else {
+ generation := defaultGeneration
+ if consumerUserData.hasGeneration() {
+ generation = consumerUserData.generation()
+ }
+ sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+ }
+ }
+ }
+
+ // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+ // current and previous consumers are the last two consumers of each partition in the above sorted map
+ for partition, consumers := range sortedPartitionConsumersByGeneration {
+ // sort consumers by generation in decreasing order
+ var generations []int
+ for generation := range consumers {
+ generations = append(generations, generation)
+ }
+ sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+ consumer := consumers[generations[0]]
+ if _, exists := currentAssignment[consumer]; !exists {
+ currentAssignment[consumer] = []topicPartitionAssignment{partition}
+ } else {
+ currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+ }
+
+ // check for previous assignment, if any
+ if len(generations) > 1 {
+ prevAssignment[partition] = consumerGenerationPair{
+ MemberID: consumers[generations[1]],
+ Generation: generations[1],
+ }
+ }
+ }
+ return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+ MemberID string
+ Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+ SrcMemberID string
+ DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+ PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+ Movements map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+ pair := p.Movements[partition]
+ delete(p.Movements, partition)
+
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ delete(partitionMovementsForThisTopic[pair], partition)
+ if len(partitionMovementsForThisTopic[pair]) == 0 {
+ delete(partitionMovementsForThisTopic, pair)
+ }
+ if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+ delete(p.PartitionMovementsByTopic, partition.Topic)
+ }
+ return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+ p.Movements[partition] = pair
+ if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+ p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+ }
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+ partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+ }
+ partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+ pair := consumerPair{
+ SrcMemberID: oldConsumer,
+ DstMemberID: newConsumer,
+ }
+ if _, exists := p.Movements[partition]; exists {
+ // this partition has previously moved
+ existingPair := p.removeMovementRecordOfPartition(partition)
+ if existingPair.DstMemberID != oldConsumer {
+ Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+ }
+ if existingPair.SrcMemberID != newConsumer {
+ // the partition is not moving back to its previous consumer
+ p.addPartitionMovementRecord(partition, consumerPair{
+ SrcMemberID: existingPair.SrcMemberID,
+ DstMemberID: newConsumer,
+ })
+ }
+ } else {
+ p.addPartitionMovementRecord(partition, pair)
+ }
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+ if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+ return partition
+ }
+ if _, exists := p.Movements[partition]; exists {
+ // this partition has previously moved
+ if oldConsumer != p.Movements[partition].DstMemberID {
+ Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+ }
+ oldConsumer = p.Movements[partition].SrcMemberID
+ }
+
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ reversePair := consumerPair{
+ SrcMemberID: newConsumer,
+ DstMemberID: oldConsumer,
+ }
+ if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+ return partition
+ }
+ var reversePairPartition topicPartitionAssignment
+ for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+ reversePairPartition = otherPartition
+ }
+ return reversePairPartition
+}
+
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+ if src == dst {
+ return currentPath, false
+ }
+ if len(pairs) == 0 {
+ return currentPath, false
+ }
+ for _, pair := range pairs {
+ if src == pair.SrcMemberID && dst == pair.DstMemberID {
+ currentPath = append(currentPath, src, dst)
+ return currentPath, true
+ }
+ }
+
+ for _, pair := range pairs {
+ if pair.SrcMemberID == src {
+ // create a deep copy of the pairs, excluding the current pair
+ reducedSet := make([]consumerPair, len(pairs)-1)
+ i := 0
+ for _, p := range pairs {
+ if p != pair {
+ reducedSet[i] = pair
+ i++
+ }
+ }
+
+ currentPath = append(currentPath, pair.SrcMemberID)
+ return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+ }
+ }
+ return currentPath, false
+}
+
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+ superCycle := make([]string, len(cycle)-1)
+ for i := 0; i < len(cycle)-1; i++ {
+ superCycle[i] = cycle[i]
+ }
+ for _, c := range cycle {
+ superCycle = append(superCycle, c)
+ }
+ for _, foundCycle := range cycles {
+ if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+ cycles := make([][]string, 0)
+ for _, pair := range pairs {
+ // create a deep copy of the pairs, excluding the current pair
+ reducedPairs := make([]consumerPair, len(pairs)-1)
+ i := 0
+ for _, p := range pairs {
+ if p != pair {
+ reducedPairs[i] = pair
+ i++
+ }
+ }
+ if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+ if !p.in(path, cycles) {
+ cycles = append(cycles, path)
+ Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+ }
+ }
+ }
+
+ // for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+ // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+ // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+ for _, cycle := range cycles {
+ if len(cycle) == 3 {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *partitionMovements) isSticky() bool {
+ for topic, movements := range p.PartitionMovementsByTopic {
+ movementPairs := make([]consumerPair, len(movements))
+ i := 0
+ for pair := range movements {
+ movementPairs[i] = pair
+ i++
+ }
+ if p.hasCycles(movementPairs) {
+ Logger.Printf("Stickiness is violated for topic %s", topic)
+ Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+ return false
+ }
+ }
+ return true
+}
+
+func indexOfSubList(source []string, target []string) int {
+ targetSize := len(target)
+ maxCandidate := len(source) - targetSize
+nextCand:
+ for candidate := 0; candidate <= maxCandidate; candidate++ {
+ j := candidate
+ for i := 0; i < targetSize; i++ {
+ if target[i] != source[j] {
+ // Element mismatch, try next cand
+ continue nextCand
+ }
+ j++
+ }
+ // All elements of candidate matched target
+ return candidate
+ }
+ return -1
+}
+
+type consumerGroupMember struct {
+ id string
+ assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+ // order asssignment priority queue in descending order using assignment-count/member-id
+ if len(pq[i].assignments) == len(pq[j].assignments) {
+ return strings.Compare(pq[i].id, pq[j].id) > 0
+ }
+ return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+ member := x.(*consumerGroupMember)
+ *pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ member := old[n-1]
+ *pq = old[0 : n-1]
+ return member
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 0000000..8146749
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,1354 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ metrics "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ conf *Config
+ rack *string
+
+ id int32
+ addr string
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened int32
+ responses chan responsePromise
+ done chan bool
+
+ registeredMetrics []string
+
+ incomingByteRate metrics.Meter
+ requestRate metrics.Meter
+ requestSize metrics.Histogram
+ requestLatency metrics.Histogram
+ outgoingByteRate metrics.Meter
+ responseRate metrics.Meter
+ responseSize metrics.Histogram
+ brokerIncomingByteRate metrics.Meter
+ brokerRequestRate metrics.Meter
+ brokerRequestSize metrics.Histogram
+ brokerRequestLatency metrics.Histogram
+ brokerOutgoingByteRate metrics.Meter
+ brokerResponseRate metrics.Meter
+ brokerResponseSize metrics.Histogram
+
+ kerberosAuthenticator GSSAPIKerberosAuth
+}
+
+// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
+type SASLMechanism string
+
+const (
+ // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
+ SASLTypeOAuth = "OAUTHBEARER"
+ // SASLTypePlaintext represents the SASL/PLAIN mechanism
+ SASLTypePlaintext = "PLAIN"
+ // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+ SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+ // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+ SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+ SASLTypeGSSAPI = "GSSAPI"
+ // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
+ // server negotiate SASL auth using opaque packets.
+ SASLHandshakeV0 = int16(0)
+ // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
+ // server negotiate SASL by wrapping tokens with Kafka protocol headers.
+ SASLHandshakeV1 = int16(1)
+ // SASLExtKeyAuth is the reserved extension key name sent as part of the
+ // SASL/OAUTHBEARER intial client response
+ SASLExtKeyAuth = "auth"
+)
+
+// AccessToken contains an access token used to authenticate a
+// SASL/OAUTHBEARER client along with associated metadata.
+type AccessToken struct {
+ // Token is the access token payload.
+ Token string
+ // Extensions is a optional map of arbitrary key-value pairs that can be
+ // sent with the SASL/OAUTHBEARER initial client response. These values are
+ // ignored by the SASL server if they are unexpected. This feature is only
+ // supported by Kafka >= 2.1.0.
+ Extensions map[string]string
+}
+
+// AccessTokenProvider is the interface that encapsulates how implementors
+// can generate access tokens for Kafka broker authentication.
+type AccessTokenProvider interface {
+ // Token returns an access token. The implementation should ensure token
+ // reuse so that multiple calls at connect time do not create multiple
+ // tokens. The implementation should also periodically refresh the token in
+ // order to guarantee that each call returns an unexpired token. This
+ // method should not block indefinitely--a timeout error should be returned
+ // after a short period of inactivity so that the broker connection logic
+ // can log debugging information and retry.
+ Token() (*AccessToken, error)
+}
+
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+ // Begin prepares the client for the SCRAM exchange
+ // with the server with a user name and a password
+ Begin(userName, password, authzID string) error
+ // Step steps client through the SCRAM exchange. It is
+ // called repeatedly until it errors or `Done` returns true.
+ Step(challenge string) (response string, err error)
+ // Done should return true when the SCRAM conversation
+ // is over.
+ Done() bool
+}
+
+type responsePromise struct {
+ requestTime time.Time
+ correlationID int32
+ packets chan []byte
+ errors chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+ return ErrAlreadyConnected
+ }
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ b.lock.Lock()
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := net.Dialer{
+ Timeout: conf.Net.DialTimeout,
+ KeepAlive: conf.Net.KeepAlive,
+ LocalAddr: conf.Net.LocalAddr,
+ }
+
+ if conf.Net.TLS.Enable {
+ b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+ } else if conf.Net.Proxy.Enable {
+ b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
+ } else {
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ }
+ if b.connErr != nil {
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ b.conn = newBufConn(b.conn)
+
+ b.conf = conf
+
+ // Create or reuse the global metrics shared between brokers
+ b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+ b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+ b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+ b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+ b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+ b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+ b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+ // Do not gather metrics for seeded broker (only used during bootstrap) because they share
+ // the same id (-1) and are already exposed through the global metrics above
+ if b.id >= 0 {
+ b.registerMetrics()
+ }
+
+ if conf.Net.SASL.Enable {
+
+ b.connErr = b.authenticateViaSASL()
+
+ if b.connErr != nil {
+ err = b.conn.Close()
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ }
+
+ b.done = make(chan bool)
+ b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ if b.id >= 0 {
+ Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ go withRecover(b.responseReceiver)
+ })
+
+ return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+//Close closes the broker resources
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ b.unregisterMetrics()
+
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+
+ atomic.StoreInt32(&b.opened, 0)
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known. The returned value corresponds to the
+// broker's broker.rack configuration setting. Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+ if b.rack == nil {
+ return ""
+ }
+ return *b.rack
+}
+
+//GetMetadata send a metadata request and returns a metadata response or error
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//FindCoordinator sends a find coordinate request and returns a response or error
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+ response := new(FindCoordinatorResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//GetAvailableOffsets return an offset response or error
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//Produce returns a produce response or error
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var (
+ response *ProduceResponse
+ err error
+ )
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//Fetch returns a FetchResponse or error
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//CommitOffset return an Offset commit reponse or error
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//FetchOffset returns an offset fetch response or error
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//JoinGroup returns a join group response or error
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+ response := new(JoinGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//SyncGroup returns a sync group response or error
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+ response := new(SyncGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//LeaveGroup return a leave group response or error
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+ response := new(LeaveGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//Heartbeat returns a heartbeat response or error
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+ response := new(HeartbeatResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//ListGroups return a list group response or error
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+ response := new(ListGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DescribeGroups return describe group response or error
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+ response := new(DescribeGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//ApiVersions return api version response or error
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+ response := new(ApiVersionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//CreateTopics send a create topic request and returns create topic response
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+ response := new(CreateTopicsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DeleteTopics sends a delete topic request and returns delete topic response
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+ response := new(DeleteTopicsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//CreatePartitions sends a create partition request and returns create
+//partitions response or error
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+ response := new(CreatePartitionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DeleteRecords send a request to delete records and return delete record
+//response or error
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+ response := new(DeleteRecordsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DescribeAcls sends a describe acl request and returns a response or error
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+ response := new(DescribeAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//CreateAcls sends a create acl request and returns a response or error
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+ response := new(CreateAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DeleteAcls sends a delete acl request and returns a response or error
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+ response := new(DeleteAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//InitProducerID sends an init producer request and returns a response or error
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+ response := new(InitProducerIDResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//AddPartitionsToTxn send a request to add partition to txn and returns
+//a response or error
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+ response := new(AddPartitionsToTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+//or error
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+ response := new(AddOffsetsToTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//EndTxn sends a request to end txn and returns a response or error
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+ response := new(EndTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//TxnOffsetCommit sends a request to commit transaction offsets and returns
+//a response or error
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+ response := new(TxnOffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DescribeConfigs sends a request to describe config and returns a response or
+//error
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+ response := new(DescribeConfigsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//AlterConfigs sends a request to alter config and return a response or error
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+ response := new(AlterConfigsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DeleteGroups sends a request to delete groups and returns a response or error
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+ response := new(DeleteGroupsResponse)
+
+ if err := b.sendAndReceive(request, response); err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+//DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+ response := new(DescribeLogDirsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+ if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+ return 0, err
+ }
+
+ return io.ReadFull(b.conn, buf)
+}
+
+// write ensures the conn WriteDeadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+ if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
+ return 0, err
+ }
+
+ return b.conn.Write(buf)
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ if b.connErr != nil {
+ return nil, b.connErr
+ }
+ return nil, ErrNotConnected
+ }
+
+ if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ return nil, err
+ }
+ b.correlationID++
+
+ if !promiseResponse {
+ // Record request latency without the response
+ b.updateRequestLatencyMetrics(time.Since(requestTime))
+ return nil, nil
+ }
+
+ promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+ b.responses <- promise
+
+ return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+ promise, err := b.send(req, res != nil)
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ select {
+ case buf := <-promise.packets:
+ return versionedDecode(buf, res, req.version())
+ case err = <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ b.rack, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.Atoi(portstr)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ if version >= 1 {
+ err = pe.putNullableString(b.rack)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+ header := make([]byte, 8)
+
+ for response := range b.responses {
+ if dead != nil {
+ response.errors <- dead
+ continue
+ }
+
+ bytesReadHeader, err := b.readFull(header)
+ requestLatency := time.Since(response.requestTime)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = decode(header, &decodedHeader)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+ if decodedHeader.correlationID != response.correlationID {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+ response.errors <- dead
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-4)
+ bytesReadBody, err := b.readFull(buf)
+ b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ response.packets <- buf
+ }
+ close(b.done)
+}
+
+func (b *Broker) authenticateViaSASL() error {
+ switch b.conf.Net.SASL.Mechanism {
+ case SASLTypeOAuth:
+ return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
+ case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+ return b.sendAndReceiveSASLSCRAMv1()
+ case SASLTypeGSSAPI:
+ return b.sendAndReceiveKerberos()
+ default:
+ return b.sendAndReceiveSASLPlainAuth()
+ }
+}
+
+func (b *Broker) sendAndReceiveKerberos() error {
+ b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+ if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+ b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+ }
+ return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+ rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+
+ header := make([]byte, 8) // response header
+ _, err = b.readFull(header)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+ return err
+ }
+
+ length := binary.BigEndian.Uint32(header[:4])
+ payload := make([]byte, length-4)
+ n, err := b.readFull(payload)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+ return err
+ }
+
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &SaslHandshakeResponse{}
+
+ err = versionedDecode(payload, res, 0)
+ if err != nil {
+ Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+ return err
+ }
+
+ if res.Err != ErrNoError {
+ Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+ return res.Err
+ }
+
+ Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
+ return nil
+}
+
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+// message = [authzid] UTF8NUL authcid UTF8NUL passwd
+// authcid = 1*SAFE ; MUST accept up to 255 octets
+// authzid = 1*SAFE ; MUST accept up to 255 octets
+// passwd = 1*SAFE ; MUST accept up to 255 octets
+// UTF8NUL = %x00 ; UTF-8 encoded NUL character
+//
+// SAFE = UTF1 / UTF2 / UTF3 / UTF4
+// ;; any UTF-8 encoded Unicode character except NUL
+//
+// With SASL v0 handshake and auth then:
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection.
+//
+// With SASL v1 handshake and auth then:
+// When credentials are invalid, Kafka replies with a SaslAuthenticate response
+// containing an error code and message detailing the authentication failure.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+ // default to V0 to allow for backward compatability when SASL is enabled
+ // but not the handshake
+ if b.conf.Net.SASL.Handshake {
+
+ handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+ return handshakeErr
+ }
+ }
+
+ if b.conf.Net.SASL.Version == SASLHandshakeV1 {
+ return b.sendAndReceiveV1SASLPlainAuth()
+ }
+ return b.sendAndReceiveV0SASLPlainAuth()
+}
+
+// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
+func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
+
+ length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+ authBytes := make([]byte, length+4) //4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+ requestTime := time.Now()
+ bytesWritten, err := b.write(authBytes)
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ header := make([]byte, 4)
+ n, err := b.readFull(header)
+ b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+ // If the credentials are valid, we would get a 4 byte response filled with null characters.
+ // Otherwise, the broker closes the connection and we get an EOF
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+ return nil
+}
+
+// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
+func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
+ correlationID := b.correlationID
+
+ requestTime := time.Now()
+
+ bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
+
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ b.correlationID++
+
+ bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
+ b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
+
+ // With v1 sasl we get an error message set in the response we can return
+ if err != nil {
+ Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ return nil
+}
+
+// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
+// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
+func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
+ if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
+ return err
+ }
+
+ token, err := provider.Token()
+ if err != nil {
+ return err
+ }
+
+ message, err := buildClientFirstMessage(token)
+ if err != nil {
+ return err
+ }
+
+ challenged, err := b.sendClientMessage(message)
+ if err != nil {
+ return err
+ }
+
+ if challenged {
+ // Abort the token exchange. The broker returns the failure code.
+ _, err = b.sendClientMessage([]byte(`\x01`))
+ }
+
+ return err
+}
+
+// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
+// if the broker responds with a challenge, in which case the token is
+// rejected.
+func (b *Broker) sendClientMessage(message []byte) (bool, error) {
+
+ requestTime := time.Now()
+ correlationID := b.correlationID
+
+ bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
+ if err != nil {
+ return false, err
+ }
+
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ b.correlationID++
+
+ res := &SaslAuthenticateResponse{}
+ bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
+
+ requestLatency := time.Since(requestTime)
+ b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+
+ isChallenge := len(res.SaslAuthBytes) > 0
+
+ if isChallenge && err != nil {
+ Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
+ }
+
+ return isChallenge, err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
+ if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
+ return err
+ }
+
+ scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+ if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+ return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
+ }
+
+ msg, err := scramClient.Step("")
+ if err != nil {
+ return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
+
+ }
+
+ for !scramClient.Done() {
+ requestTime := time.Now()
+ correlationID := b.correlationID
+ bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ b.correlationID++
+ challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
+ msg, err = scramClient.Step(string(challenge))
+ if err != nil {
+ Logger.Println("SASL authentication failed", err)
+ return err
+ }
+ }
+
+ Logger.Println("SASL authentication succeeded")
+ return nil
+}
+
+func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
+ rb := &SaslAuthenticateRequest{msg}
+ req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.write(buf)
+}
+
+func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
+ buf := make([]byte, responseLengthSize+correlationIDSize)
+ _, err := b.readFull(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ header := responseHeader{}
+ err = decode(buf, &header)
+ if err != nil {
+ return nil, err
+ }
+
+ if header.correlationID != correlationID {
+ return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
+ }
+
+ buf = make([]byte, header.length-correlationIDSize)
+ _, err = b.readFull(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ res := &SaslAuthenticateResponse{}
+ if err := versionedDecode(buf, res, 0); err != nil {
+ return nil, err
+ }
+ if res.Err != ErrNoError {
+ return nil, res.Err
+ }
+ return res.SaslAuthBytes, nil
+}
+
+// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
+// https://tools.ietf.org/html/rfc7628
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
+ var ext string
+
+ if token.Extensions != nil && len(token.Extensions) > 0 {
+ if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
+ return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
+ }
+ ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
+ }
+
+ resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
+
+ return resp, nil
+}
+
+// mapToString returns a list of key-value pairs ordered by key.
+// keyValSep separates the key from the value. elemSep separates each pair.
+func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
+ buf := make([]string, 0, len(extensions))
+
+ for k, v := range extensions {
+ buf = append(buf, k+keyValSep+v)
+ }
+
+ sort.Strings(buf)
+
+ return strings.Join(buf, elemSep)
+}
+
+func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
+ authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+ rb := &SaslAuthenticateRequest{authBytes}
+ req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.write(buf)
+}
+
+func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
+
+ rb := &SaslAuthenticateRequest{initialResp}
+
+ req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.write(buf)
+}
+
+func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
+ buf := make([]byte, responseLengthSize+correlationIDSize)
+ bytesRead, err := b.readFull(buf)
+ if err != nil {
+ return bytesRead, err
+ }
+
+ header := responseHeader{}
+ err = decode(buf, &header)
+ if err != nil {
+ return bytesRead, err
+ }
+
+ if header.correlationID != correlationID {
+ return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
+ }
+
+ buf = make([]byte, header.length-correlationIDSize)
+ c, err := b.readFull(buf)
+ bytesRead += c
+ if err != nil {
+ return bytesRead, err
+ }
+
+ if err := versionedDecode(buf, res, 0); err != nil {
+ return bytesRead, err
+ }
+
+ if res.Err != ErrNoError {
+ return bytesRead, res.Err
+ }
+
+ return bytesRead, nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+ b.updateRequestLatencyMetrics(requestLatency)
+ b.responseRate.Mark(1)
+
+ if b.brokerResponseRate != nil {
+ b.brokerResponseRate.Mark(1)
+ }
+
+ responseSize := int64(bytes)
+ b.incomingByteRate.Mark(responseSize)
+ if b.brokerIncomingByteRate != nil {
+ b.brokerIncomingByteRate.Mark(responseSize)
+ }
+
+ b.responseSize.Update(responseSize)
+ if b.brokerResponseSize != nil {
+ b.brokerResponseSize.Update(responseSize)
+ }
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+ requestLatencyInMs := int64(requestLatency / time.Millisecond)
+ b.requestLatency.Update(requestLatencyInMs)
+
+ if b.brokerRequestLatency != nil {
+ b.brokerRequestLatency.Update(requestLatencyInMs)
+ }
+
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+ b.requestRate.Mark(1)
+ if b.brokerRequestRate != nil {
+ b.brokerRequestRate.Mark(1)
+ }
+
+ requestSize := int64(bytes)
+ b.outgoingByteRate.Mark(requestSize)
+ if b.brokerOutgoingByteRate != nil {
+ b.brokerOutgoingByteRate.Mark(requestSize)
+ }
+
+ b.requestSize.Update(requestSize)
+ if b.brokerRequestSize != nil {
+ b.brokerRequestSize.Update(requestSize)
+ }
+
+}
+
+func (b *Broker) registerMetrics() {
+ b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+ b.brokerRequestRate = b.registerMeter("request-rate")
+ b.brokerRequestSize = b.registerHistogram("request-size")
+ b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+ b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+ b.brokerResponseRate = b.registerMeter("response-rate")
+ b.brokerResponseSize = b.registerHistogram("response-size")
+}
+
+func (b *Broker) unregisterMetrics() {
+ for _, name := range b.registeredMetrics {
+ b.conf.MetricRegistry.Unregister(name)
+ }
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+ nameForBroker := getMetricNameForBroker(name, b)
+ b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+ return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+ nameForBroker := getMetricNameForBroker(name, b)
+ b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+ return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 0000000..040cfe9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,1001 @@
+package sarama
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
+ Controller() (*Broker, error)
+
+ // Brokers returns the current set of active brokers as retrieved from cluster metadata.
+ Brokers() []*Broker
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // InSyncReplicas returns the set of all in-sync replica IDs for the given
+ // partition. In-sync replicas are replicas which are fully caught up with
+ // the partition leader.
+ InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // OfflineReplicas returns the set of all offline replica IDs for the given
+ // partition. Offline replicas are replicas which are offline
+ OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time (in milliseconds) on the topic/partition combination.
+ // Time should be OffsetOldest for the earliest available offset,
+ // OffsetNewest for the offset of the message that will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // InitProducerID retrieves information required for Idempotent Producer
+ InitProducerID() (*InitProducerIDResponse, error)
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ controllerID int32 // cluster controller broker id
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ metadataTopics map[string]none // topics that need to collect metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ Logger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ metadataTopics: make(map[string]none),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ }
+
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+
+ if conf.Metadata.Full {
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ Logger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ brokers := make([]*Broker, 0, len(client.brokers))
+ for _, broker := range client.brokers {
+ brokers = append(brokers, broker)
+ }
+ return brokers
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+ var err error
+ for broker := client.any(); broker != nil; broker = client.any() {
+
+ req := &InitProducerIDRequest{}
+
+ response, err := broker.InitProducerID(req)
+ switch err.(type) {
+ case nil:
+ return response, nil
+ default:
+ // some error, remove that broker and try again
+ Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+ return nil, err
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ Logger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+ client.metadataTopics = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadataTopics))
+ for topic := range client.metadataTopics {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, allPartitions)
+
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, allPartitions)
+ }
+
+ // no partitions found after refresh metadata
+ if len(partitions) == 0 {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, writablePartitions)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, writablePartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return dupInt32Slice(metadata.Replicas), metadata.Err
+ }
+ return dupInt32Slice(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return dupInt32Slice(metadata.Isr), metadata.Err
+ }
+ return dupInt32Slice(metadata.Isr), nil
+}
+
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
+ }
+ return dupInt32Slice(metadata.OfflineReplicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ leader, err := client.cachedLeader(topic, partitionID)
+
+ if leader == nil {
+ err = client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ leader, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+ for _, topic := range topics {
+ if len(topic) == 0 {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ }
+
+ deadline := time.Time{}
+ if client.conf.Metadata.Timeout > 0 {
+ deadline = time.Now().Add(client.conf.Metadata.Timeout)
+ }
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, time)
+
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, time)
+ }
+
+ return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ controller := client.cachedController()
+ if controller == nil {
+ if err := client.refreshMetadata(); err != nil {
+ return nil, err
+ }
+ controller = client.cachedController()
+ }
+
+ if controller == nil {
+ return nil, ErrControllerNotAvailable
+ }
+
+ _ = controller.Open(client.conf)
+ return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ } else {
+ // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+ // but we really shouldn't have to; once that loop is made better this case can be
+ // removed, and the function generally can be renamed from `deregisterBroker` to
+ // `nextSeedBroker` or something
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ // not guaranteed to be random *or* deterministic
+ for _, broker := range client.brokers {
+ _ = broker.Open(client.conf)
+ return broker
+ }
+
+ return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if metadata.Err == ErrLeaderNotAvailable {
+ return nil, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, nil
+ }
+ }
+
+ return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := &OffsetRequest{}
+ if client.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 1
+ }
+ request.AddBlock(topic, partitionID, time, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if block.Err != ErrNoError {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := client.refreshMetadata(); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) refreshMetadata() error {
+ topics := []string{}
+
+ if !client.conf.Metadata.Full {
+ if specificTopics, err := client.MetadataTopics(); err != nil {
+ return err
+ } else if len(specificTopics) == 0 {
+ return ErrNoTopicsToUpdateMetadata
+ } else {
+ topics = specificTopics
+ }
+ }
+
+ if err := client.RefreshMetadata(topics...); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+ pastDeadline := func(backoff time.Duration) bool {
+ if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+ // we are past the deadline
+ return true
+ }
+ return false
+ }
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ backoff := client.computeBackoff(attemptsRemaining)
+ if pastDeadline(backoff) {
+ Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+ return err
+ }
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ if backoff > 0 {
+ time.Sleep(backoff)
+ }
+ return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
+ }
+ return err
+ }
+
+ broker := client.any()
+ for ; broker != nil && !pastDeadline(0); broker = client.any() {
+ allowAutoTopicCreation := true
+ if len(topics) > 0 {
+ Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ allowAutoTopicCreation = false
+ Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+
+ req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
+ if client.conf.Version.IsAtLeast(V1_0_0_0) {
+ req.Version = 5
+ } else if client.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 1
+ }
+ response, err := broker.GetMetadata(req)
+ switch err.(type) {
+ case nil:
+ allKnownMetaData := len(topics) == 0
+ // valid response, use it
+ shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+ if shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ }
+ return err
+
+ case PacketEncodingError:
+ // didn't even send, return the error
+ return err
+
+ case KError:
+ // if SASL auth error return as this _should_ be a non retryable err for all brokers
+ if err.(KError) == ErrSASLAuthenticationFailed {
+ Logger.Println("client/metadata failed SASL authentication")
+ return err
+ }
+
+ if err.(KError) == ErrTopicAuthorizationFailed {
+ Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+ return err
+ }
+ // else remove that broker and try again
+ Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+
+ default:
+ // some other error, remove that broker and try again
+ Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ if broker != nil {
+ Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+ return retry(ErrOutOfBrokers)
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ for _, broker := range data.Brokers {
+ client.registerBroker(broker)
+ }
+
+ client.controllerID = data.ControllerID
+
+ if allKnownMetaData {
+ client.metadata = make(map[string]map[int32]*PartitionMetadata)
+ client.metadataTopics = make(map[string]none)
+ client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+ }
+ for _, topic := range data.Topics {
+ // topics must be added firstly to `metadataTopics` to guarantee that all
+ // requested topics must be recorded to keep them trackable for periodically
+ // metadata refresh.
+ if _, exists := client.metadataTopics[topic.Name]; !exists {
+ client.metadataTopics[topic.Name] = none{}
+ }
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ // no-op
+ case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if partition.Err == ErrLeaderNotAvailable {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) cachedController() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ return client.brokers[client.controllerID]
+}
+
+func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
+ if client.conf.Metadata.Retry.BackoffFunc != nil {
+ maxRetries := client.conf.Metadata.Retry.Max
+ retries := maxRetries - attemptsRemaining
+ return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
+ }
+ return client.conf.Metadata.Retry.Backoff
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+ retry := func(err error) (*FindCoordinatorResponse, error) {
+ if attemptsRemaining > 0 {
+ backoff := client.computeBackoff(attemptsRemaining)
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(backoff)
+ return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+ }
+ return nil, err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+ request := new(FindCoordinatorRequest)
+ request.CoordinatorKey = consumerGroup
+ request.CoordinatorType = CoordinatorGroup
+
+ response, err := broker.FindCoordinator(request)
+
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ switch err.(type) {
+ case PacketEncodingError:
+ return nil, err
+ default:
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ switch response.Err {
+ case ErrNoError:
+ Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+
+ case ErrConsumerCoordinatorNotAvailable:
+ Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ case ErrGroupAuthorizationFailed:
+ Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
+ return retry(ErrGroupAuthorizationFailed)
+
+ default:
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+ Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
new file mode 100644
index 0000000..9247c35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/compress.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "sync"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+var (
+ lz4WriterPool = sync.Pool{
+ New: func() interface{} {
+ return lz4.NewWriter(nil)
+ },
+ }
+
+ gzipWriterPool = sync.Pool{
+ New: func() interface{} {
+ return gzip.NewWriter(nil)
+ },
+ }
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+ switch cc {
+ case CompressionNone:
+ return data, nil
+ case CompressionGZIP:
+ var (
+ err error
+ buf bytes.Buffer
+ writer *gzip.Writer
+ )
+ if level != CompressionLevelDefault {
+ writer, err = gzip.NewWriterLevel(&buf, level)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ writer = gzipWriterPool.Get().(*gzip.Writer)
+ defer gzipWriterPool.Put(writer)
+ writer.Reset(&buf)
+ }
+ if _, err := writer.Write(data); err != nil {
+ return nil, err
+ }
+ if err := writer.Close(); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ case CompressionSnappy:
+ return snappy.Encode(data), nil
+ case CompressionLZ4:
+ writer := lz4WriterPool.Get().(*lz4.Writer)
+ defer lz4WriterPool.Put(writer)
+
+ var buf bytes.Buffer
+ writer.Reset(&buf)
+
+ if _, err := writer.Write(data); err != nil {
+ return nil, err
+ }
+ if err := writer.Close(); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ case CompressionZSTD:
+ return zstdCompress(nil, data)
+ default:
+ return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 0000000..e515e04
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,695 @@
+package sarama
+
+import (
+ "compress/gzip"
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "regexp"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+ "golang.org/x/net/proxy"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+ Admin struct {
+ // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+ // including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+ Timeout time.Duration
+ }
+
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // SASL based authentication with broker. While there are multiple SASL authentication methods
+ // the current implementation is limited to plaintext (SASL/PLAIN) authentication
+ SASL struct {
+ // Whether or not to use SASL authentication when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // SASLMechanism is the name of the enabled SASL mechanism.
+ // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
+ Mechanism SASLMechanism
+ // Version is the SASL Protocol Version to use
+ // Kafka > 1.x should use V1, except on Azure EventHub which use V0
+ Version int16
+ // Whether or not to send the Kafka SASL handshake first if enabled
+ // (defaults to true). You should only set this to false if you're using
+ // a non-Kafka SASL proxy.
+ Handshake bool
+ //username and password for SASL/PLAIN or SASL/SCRAM authentication
+ User string
+ Password string
+ // authz id used for SASL/SCRAM authentication
+ SCRAMAuthzID string
+ // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+ // client used to perform the SCRAM exchange with the server.
+ SCRAMClientGeneratorFunc func() SCRAMClient
+ // TokenProvider is a user-defined callback for generating
+ // access tokens for SASL/OAUTHBEARER auth. See the
+ // AccessTokenProvider interface docs for proper implementation
+ // guidelines.
+ TokenProvider AccessTokenProvider
+
+ GSSAPI GSSAPIConfig
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection.
+ // If zero, keep-alives are disabled. (default is 0: disabled).
+ KeepAlive time.Duration
+
+ // LocalAddr is the local address to use when dialing an
+ // address. The address must be of a compatible type for the
+ // network being dialed.
+ // If nil, a local address is automatically chosen.
+ LocalAddr net.Addr
+
+ Proxy struct {
+ // Whether or not to use proxy when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The proxy dialer to use enabled (defaults to nil).
+ Dialer proxy.Dialer
+ }
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries, maxRetries int) time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+
+ // Whether to maintain a full set of metadata for all topics, or just
+ // the minimal set that has been necessary so far. The full set is simpler
+ // and usually more convenient, but can take up a substantial amount of
+ // memory if you have many topics and partitions. Defaults to true.
+ Full bool
+
+ // How long to wait for a successful metadata response.
+ // Disabled by default which means a metadata request against an unreachable
+ // cluster (all brokers are unreachable or unresponsive) can take up to
+ // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+ // to fail.
+ Timeout time.Duration
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // The level of compression to use on messages. The meaning depends
+ // on the actual compression type used and defaults to default compression
+ // level for the codec.
+ CompressionLevel int
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+ // If enabled, the producer will ensure that exactly one copy of each message is
+ // written.
+ Idempotent bool
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries, maxRetries int) time.Duration
+ }
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ Consumer struct {
+
+ // Group is the namespace for configuring consumer group.
+ Group struct {
+ Session struct {
+ // The timeout used to detect consumer failures when using Kafka's group management facility.
+ // The consumer sends periodic heartbeats to indicate its liveness to the broker.
+ // If no heartbeats are received by the broker before the expiration of this session timeout,
+ // then the broker will remove this consumer from the group and initiate a rebalance.
+ // Note that the value must be in the allowable range as configured in the broker configuration
+ // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+ Timeout time.Duration
+ }
+ Heartbeat struct {
+ // The expected time between heartbeats to the consumer coordinator when using Kafka's group
+ // management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+ // to facilitate rebalancing when new consumers join or leave the group.
+ // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+ // higher than 1/3 of that value.
+ // It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+ Interval time.Duration
+ }
+ Rebalance struct {
+ // Strategy for allocating topic partitions to members (default BalanceStrategyRange)
+ Strategy BalanceStrategy
+ // The maximum allowed time for each worker to join the group once a rebalance has begun.
+ // This is basically a limit on the amount of time needed for all tasks to flush any pending
+ // data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+ // the group, which will cause offset commit failures (default 60s).
+ Timeout time.Duration
+
+ Retry struct {
+ // When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+ // the load to assign partitions to each consumer. If the set of consumers changes while
+ // this assignment is taking place the rebalance will fail and retry. This setting controls
+ // the maximum number of attempts before giving up (default 4).
+ Max int
+ // Backoff time between retries during rebalance (default 2s)
+ Backoff time.Duration
+ }
+ }
+ Member struct {
+ // Custom metadata to include when joining the group. The user data for all joined members
+ // can be retrieved by sending a DescribeGroupRequest to the broker that is the
+ // coordinator for the group.
+ UserData []byte
+ }
+ }
+
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries int) time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 1MB). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.wait.max.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to
+ // process for the user. If writing to the Messages channel takes longer
+ // than this, that partition will stop fetching more messages until it
+ // can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
+ // If a message is not written to the Messages channel between two ticks
+ // of the expiryTicker then a timeout is detected.
+ // Using a ticker instead of a timer to detect timeouts should typically
+ // result in many fewer calls to Timer functions which may result in a
+ // significant performance improvement if many messages are being sent
+ // and timeouts are infrequent.
+ // The disadvantage of using a ticker instead of a timer is that
+ // timeouts will be less accurate. That is, the effective timeout could
+ // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+ // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+ // between two messages being sent may not be recognized as a timeout.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occurred while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // How frequently to commit updated offsets. Defaults to 1s.
+ CommitInterval time.Duration
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+
+ // The retention duration for committed offsets. If zero, disabled
+ // (in which case the `offsets.retention.minutes` option on the
+ // broker will be used). Kafka only supports precision up to
+ // milliseconds; nanoseconds will be truncated. Requires Kafka
+ // broker version 0.9.0 or later.
+ // (default is 0: disabled).
+ Retention time.Duration
+
+ Retry struct {
+ // The total number of times to retry failing commit
+ // requests during OffsetManager shutdown (default 3).
+ Max int
+ }
+ }
+
+ // IsolationLevel support 2 mode:
+ // - use `ReadUncommitted` (default) to consume and return all messages in message channel
+ // - use `ReadCommitted` to hide messages that are part of an aborted transaction
+ IsolationLevel IsolationLevel
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+ // The version of Kafka that Sarama will assume it is running against.
+ // Defaults to the oldest supported stable version. Since Kafka provides
+ // backwards-compatibility, setting it to a version older than you have
+ // will not break anything, although it may prevent you from using the
+ // latest features. Setting it to a version greater than you are actually
+ // running may lead to random breakage.
+ Version KafkaVersion
+ // The registry to define metrics into.
+ // Defaults to a local registry.
+ // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+ // prior to starting Sarama.
+ // See Examples on how to use the metrics registry
+ MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Admin.Timeout = 3 * time.Second
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+ c.Net.SASL.Handshake = true
+ c.Net.SASL.Version = SASLHandshakeV0
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+ c.Metadata.Full = true
+
+ c.Producer.MaxMessageBytes = 1000000
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+ c.Producer.CompressionLevel = CompressionLevelDefault
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 1024 * 1024
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 250 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.CommitInterval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+ c.Consumer.Offsets.Retry.Max = 3
+
+ c.Consumer.Group.Session.Timeout = 10 * time.Second
+ c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+ c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
+ c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+ c.Consumer.Group.Rebalance.Retry.Max = 4
+ c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+
+ c.ClientID = defaultClientID
+ c.ChannelBufferSize = 256
+ c.Version = MinVersion
+ c.MetricRegistry = metrics.NewRegistry()
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if !c.Net.SASL.Enable {
+ if c.Net.SASL.User != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+ }
+ if c.Net.SASL.Password != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+ }
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+ Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+ Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == defaultClientID {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.KeepAlive < 0:
+ return ConfigurationError("Net.KeepAlive must be >= 0")
+ case c.Net.SASL.Enable:
+ if c.Net.SASL.Mechanism == "" {
+ c.Net.SASL.Mechanism = SASLTypePlaintext
+ }
+
+ switch c.Net.SASL.Mechanism {
+ case SASLTypePlaintext:
+ if c.Net.SASL.User == "" {
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.Password == "" {
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+ case SASLTypeOAuth:
+ if c.Net.SASL.TokenProvider == nil {
+ return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
+ }
+ case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+ if c.Net.SASL.User == "" {
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.Password == "" {
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+ return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+ }
+ case SASLTypeGSSAPI:
+ if c.Net.SASL.GSSAPI.ServiceName == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+ }
+
+ if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
+ if c.Net.SASL.GSSAPI.Password == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+ "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+ }
+ } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
+ if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+ " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+ }
+ } else {
+ return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
+ }
+ if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+ }
+ if c.Net.SASL.GSSAPI.Username == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+ }
+ if c.Net.SASL.GSSAPI.Realm == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+ }
+ default:
+ msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+ SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
+ return ConfigurationError(msg)
+ }
+ }
+
+ // validate the Admin values
+ switch {
+ case c.Admin.Timeout <= 0:
+ return ConfigurationError("Admin.Timeout must be > 0")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+ return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+ }
+
+ if c.Producer.Compression == CompressionGZIP {
+ if c.Producer.CompressionLevel != CompressionLevelDefault {
+ if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
+ return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+ }
+ }
+ }
+
+ if c.Producer.Idempotent {
+ if !c.Version.IsAtLeast(V0_11_0_0) {
+ return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+ }
+ if c.Producer.Retry.Max == 0 {
+ return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+ }
+ if c.Producer.RequiredAcks != WaitForAll {
+ return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+ }
+ if c.Net.MaxOpenRequests > 1 {
+ return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+ }
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.CommitInterval <= 0:
+ return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+ case c.Consumer.Offsets.Retry.Max < 0:
+ return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+ case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+ return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+ }
+
+ // validate IsolationLevel
+ if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+ return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
+ }
+
+ // validate the Consumer Group values
+ switch {
+ case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+ return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+ case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+ return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+ case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+ return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+ case c.Consumer.Group.Rebalance.Strategy == nil:
+ return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
+ case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+ return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+ case c.Consumer.Group.Rebalance.Retry.Max < 0:
+ return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+ case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ case !validID.MatchString(c.ClientID):
+ return ConfigurationError("ClientID is invalid")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
new file mode 100644
index 0000000..5399d75
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -0,0 +1,22 @@
+package sarama
+
+//ConfigResourceType is a type for config resource
+type ConfigResourceType int8
+
+// Taken from :
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+
+const (
+ //UnknownResource constant type
+ UnknownResource ConfigResourceType = iota
+ //AnyResource constant type
+ AnyResource
+ //TopicResource constant type
+ TopicResource
+ //GroupResource constant type
+ GroupResource
+ //ClusterResource constant type
+ ClusterResource
+ //BrokerResource constant type
+ BrokerResource
+)
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 0000000..72c4d7c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,896 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Headers []*RecordHeader // only set if kafka is version 0.11+
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
+
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+type Consumer interface {
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // HighWaterMarks returns the current high water marks for each topic and partition.
+ // Consistency between partitions is not guaranteed since high water marks are updated separately.
+ HighWaterMarks() map[string]map[int32]int64
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+}
+
+type consumer struct {
+ conf *Config
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+ client Client
+ lock sync.Mutex
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newConsumer(client)
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ return c.client.Close()
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ hwms := make(map[string]map[int32]int64)
+ for topic, p := range c.children {
+ hwm := make(map[int32]int64, len(p))
+ for partition, pc := range p {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+ // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+ // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+ // this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+ // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+ // the Messages channel when this function is called, you will be competing with Close for messages; consider
+ // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+ // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occurred during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+ highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+ consumer *consumer
+ conf *Config
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ trigger, dying chan none
+ closeOnce sync.Once
+ topic string
+ partition int32
+ responseResult error
+ fetchSize int32
+ offset int64
+ retries int32
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) computeBackoff() time.Duration {
+ if child.conf.Consumer.Retry.BackoffFunc != nil {
+ retries := atomic.AddInt32(&child.retries, 1)
+ return child.conf.Consumer.Retry.BackoffFunc(int(retries))
+ }
+ return child.conf.Consumer.Retry.Backoff
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.computeBackoff()):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+ return err
+ }
+
+ child.broker = child.consumer.refBrokerConsumer(leader)
+
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ child.closeOnce.Do(func() {
+ close(child.dying)
+ })
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range child.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+ expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+ firstAttempt := true
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+
+ if child.responseResult == nil {
+ atomic.StoreInt32(&child.retries, 0)
+ }
+
+ for i, msg := range msgs {
+ messageSelect:
+ select {
+ case <-child.dying:
+ child.broker.acks.Done()
+ continue feederLoop
+ case child.messages <- msg:
+ firstAttempt = true
+ case <-expiryTicker.C:
+ if !firstAttempt {
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ remainingLoop:
+ for _, msg = range msgs[i:] {
+ select {
+ case child.messages <- msg:
+ case <-child.dying:
+ break remainingLoop
+ }
+ }
+ child.broker.input <- child
+ continue feederLoop
+ } else {
+ // current message has not been sent, return to select
+ // statement
+ firstAttempt = false
+ goto messageSelect
+ }
+ }
+ }
+
+ child.broker.acks.Done()
+ }
+
+ expiryTicker.Stop()
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+ var messages []*ConsumerMessage
+ for _, msgBlock := range msgSet.Messages {
+ for _, msg := range msgBlock.Messages() {
+ offset := msg.Offset
+ timestamp := msg.Msg.Timestamp
+ if msg.Msg.Version >= 1 {
+ baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+ offset += baseOffset
+ if msg.Msg.LogAppendTime {
+ timestamp = msgBlock.Msg.Timestamp
+ }
+ }
+ if offset < child.offset {
+ continue
+ }
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
+ })
+ child.offset = offset + 1
+ }
+ }
+ if len(messages) == 0 {
+ child.offset++
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+ messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
+ for _, rec := range batch.Records {
+ offset := batch.FirstOffset + rec.OffsetDelta
+ if offset < child.offset {
+ continue
+ }
+ timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
+ if batch.LogAppendTime {
+ timestamp = batch.MaxTimestamp
+ }
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: rec.Key,
+ Value: rec.Value,
+ Offset: offset,
+ Timestamp: timestamp,
+ Headers: rec.Headers,
+ })
+ child.offset = offset + 1
+ }
+ if len(messages) == 0 {
+ child.offset++
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ var (
+ metricRegistry = child.conf.MetricRegistry
+ consumerBatchSizeMetric metrics.Histogram
+ )
+
+ if metricRegistry != nil {
+ consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
+ }
+
+ // If request was throttled and empty we log and return without error
+ if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+ Logger.Printf(
+ "consumer/broker/%d FetchResponse throttled %v\n",
+ child.broker.broker.ID(), response.ThrottleTime)
+ return nil, nil
+ }
+
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if block.Err != ErrNoError {
+ return nil, block.Err
+ }
+
+ nRecs, err := block.numRecords()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerBatchSizeMetric.Update(int64(nRecs))
+
+ if nRecs == 0 {
+ partialTrailingMessage, err := block.isPartial()
+ if err != nil {
+ return nil, err
+ }
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if partialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ // check int32 overflow
+ if child.fetchSize < 0 {
+ child.fetchSize = math.MaxInt32
+ }
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+ // abortedProducerIDs contains producerID which message should be ignored as uncommitted
+ // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+ // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+ abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+ abortedTransactions := block.getAbortedTransactions()
+
+ messages := []*ConsumerMessage{}
+ for _, records := range block.RecordsSet {
+ switch records.recordsType {
+ case legacyRecords:
+ messageSetMessages, err := child.parseMessages(records.MsgSet)
+ if err != nil {
+ return nil, err
+ }
+
+ messages = append(messages, messageSetMessages...)
+ case defaultRecords:
+ // Consume remaining abortedTransaction up to last offset of current batch
+ for _, txn := range abortedTransactions {
+ if txn.FirstOffset > records.RecordBatch.LastOffset() {
+ break
+ }
+ abortedProducerIDs[txn.ProducerID] = struct{}{}
+ // Pop abortedTransactions so that we never add it again
+ abortedTransactions = abortedTransactions[1:]
+ }
+
+ recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse and commit offset but do not expose messages that are:
+ // - control records
+ // - part of an aborted transaction when set to `ReadCommitted`
+
+ // control record
+ isControl, err := records.isControl()
+ if err != nil {
+ // I don't know why there is this continue in case of error to begin with
+ // Safe bet is to ignore control messages if ReadUncommitted
+ // and block on them in case of error and ReadCommitted
+ if child.conf.Consumer.IsolationLevel == ReadCommitted {
+ return nil, err
+ }
+ continue
+ }
+ if isControl {
+ controlRecord, err := records.getControlRecord()
+ if err != nil {
+ return nil, err
+ }
+
+ if controlRecord.Type == ControlRecordAbort {
+ delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+ }
+ continue
+ }
+
+ // filter aborted transactions
+ if child.conf.Consumer.IsolationLevel == ReadCommitted {
+ _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+ if records.RecordBatch.IsTransactional && isAborted {
+ continue
+ }
+ }
+
+ messages = append(messages, recordBatchMessages...)
+ default:
+ return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+ }
+ }
+
+ return messages, nil
+}
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ subscriptions map[*partitionConsumer]none
+ wait chan none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ wait: make(chan none),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+// so the main goroutine can block waiting for work if it has none.
+func (bc *brokerConsumer) subscriptionManager() {
+ var buffer []*partitionConsumer
+
+ for {
+ if len(buffer) > 0 {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- buffer:
+ buffer = nil
+ case bc.wait <- none{}:
+ }
+ } else {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- nil:
+ }
+ }
+ }
+
+done:
+ close(bc.wait)
+ if len(buffer) > 0 {
+ bc.newSubscriptions <- buffer
+ }
+ close(bc.newSubscriptions)
+}
+
+//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+func (bc *brokerConsumer) subscriptionConsumer() {
+ <-bc.wait // wait for our first piece of work
+
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Either way, the signal just hasn't propagated to our goroutine yet.
+ <-bc.wait
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ // no-op
+ }
+ }
+}
+
+//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+func (bc *brokerConsumer) handleResponses() {
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ switch result {
+ case nil:
+ // no-op
+ case errTimedOut:
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ case ErrOffsetOutOfRange:
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ default:
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscriptions := range bc.newSubscriptions {
+ if len(newSubscriptions) == 0 {
+ <-bc.wait
+ continue
+ }
+ for _, child := range newSubscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
+ request.Version = 1
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+ request.Version = 2
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 4
+ request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
+ }
+
+ for child := range bc.subscriptions {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
new file mode 100644
index 0000000..da99e88
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -0,0 +1,867 @@
+package sarama
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+ // Consume joins a cluster of consumers for a given list of topics and
+ // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+ //
+ // The life-cycle of a session is represented by the following steps:
+ //
+ // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+ // and is assigned their "fair share" of partitions, aka 'claims'.
+ // 2. Before processing starts, the handler's Setup() hook is called to notify the user
+ // of the claims and allow any necessary preparation or alteration of state.
+ // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+ // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+ // from concurrent reads/writes.
+ // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+ // parent context is cancelled or when a server-side rebalance cycle is initiated.
+ // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+ // to allow the user to perform any final tasks before a rebalance.
+ // 6. Finally, marked offsets are committed one last time before claims are released.
+ //
+ // Please note, that once a rebalance is triggered, sessions must be completed within
+ // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+ // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+ // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+ // commit failures.
+ Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+ // Errors returns a read channel of errors that occurred during the consumer life-cycle.
+ // By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan error
+
+ // Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+ // this function before the object passes out of scope, as it will otherwise leak memory.
+ Close() error
+}
+
+type consumerGroup struct {
+ client Client
+
+ config *Config
+ consumer Consumer
+ groupID string
+ memberID string
+ errors chan error
+
+ lock sync.Mutex
+ closed chan none
+ closeOnce sync.Once
+
+ userData []byte
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := newConsumerGroup(groupID, client)
+ if err != nil {
+ _ = client.Close()
+ }
+ return c, err
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
+ config := client.Config()
+ if !config.Version.IsAtLeast(V0_10_2_0) {
+ return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+ }
+
+ consumer, err := NewConsumerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+
+ return &consumerGroup{
+ client: client,
+ consumer: consumer,
+ config: config,
+ groupID: groupID,
+ errors: make(chan error, config.ChannelBufferSize),
+ closed: make(chan none),
+ }, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+ c.closeOnce.Do(func() {
+ close(c.closed)
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // leave group
+ if e := c.leave(); e != nil {
+ err = e
+ }
+
+ // drain errors
+ go func() {
+ close(c.errors)
+ }()
+ for e := range c.errors {
+ err = e
+ }
+
+ if e := c.client.Close(); e != nil {
+ err = e
+ }
+ })
+ return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+ // Ensure group is not closed
+ select {
+ case <-c.closed:
+ return ErrClosedConsumerGroup
+ default:
+ }
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Quick exit when no topics are provided
+ if len(topics) == 0 {
+ return fmt.Errorf("no topics provided")
+ }
+
+ // Refresh metadata for requested topics
+ if err := c.client.RefreshMetadata(topics...); err != nil {
+ return err
+ }
+
+ // Init session
+ sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+ if err == ErrClosedClient {
+ return ErrClosedConsumerGroup
+ } else if err != nil {
+ return err
+ }
+
+ // loop check topic partition numbers changed
+ // will trigger rebalance when any topic partitions number had changed
+ go c.loopCheckPartitionNumbers(topics, sess)
+
+ // Wait for session exit signal
+ <-sess.ctx.Done()
+
+ // Gracefully release session claims
+ return sess.release(true)
+}
+
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+ select {
+ case <-c.closed:
+ return nil, ErrClosedConsumerGroup
+ case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+ }
+
+ if refreshCoordinator {
+ err := c.client.RefreshCoordinator(c.groupID)
+ if err != nil {
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ }
+ }
+
+ return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+ coordinator, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ if retries <= 0 {
+ return nil, err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ }
+
+ // Join consumer group
+ join, err := c.joinGroupRequest(coordinator, topics)
+ if err != nil {
+ _ = coordinator.Close()
+ return nil, err
+ }
+ switch join.Err {
+ case ErrNoError:
+ c.memberID = join.MemberId
+ case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+ c.memberID = ""
+ return c.newSession(ctx, topics, handler, retries)
+ case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+ if retries <= 0 {
+ return nil, join.Err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ case ErrRebalanceInProgress: // retry after backoff
+ if retries <= 0 {
+ return nil, join.Err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, false)
+ default:
+ return nil, join.Err
+ }
+
+ // Prepare distribution plan if we joined as the leader
+ var plan BalanceStrategyPlan
+ if join.LeaderId == join.MemberId {
+ members, err := join.GetMembers()
+ if err != nil {
+ return nil, err
+ }
+
+ plan, err = c.balance(members)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Sync consumer group
+ sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
+ if err != nil {
+ _ = coordinator.Close()
+ return nil, err
+ }
+ switch sync.Err {
+ case ErrNoError:
+ case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+ c.memberID = ""
+ return c.newSession(ctx, topics, handler, retries)
+ case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+ if retries <= 0 {
+ return nil, sync.Err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ case ErrRebalanceInProgress: // retry after backoff
+ if retries <= 0 {
+ return nil, sync.Err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, false)
+ default:
+ return nil, sync.Err
+ }
+
+ // Retrieve and sort claims
+ var claims map[string][]int32
+ if len(sync.MemberAssignment) > 0 {
+ members, err := sync.GetMemberAssignment()
+ if err != nil {
+ return nil, err
+ }
+ claims = members.Topics
+ c.userData = members.UserData
+
+ for _, partitions := range claims {
+ sort.Sort(int32Slice(partitions))
+ }
+ }
+
+ return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+ req := &JoinGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+ ProtocolType: "consumer",
+ }
+ if c.config.Version.IsAtLeast(V0_10_1_0) {
+ req.Version = 1
+ req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+ }
+
+ // use static user-data if configured, otherwise use consumer-group userdata from the last sync
+ userData := c.config.Consumer.Group.Member.UserData
+ if len(userData) == 0 {
+ userData = c.userData
+ }
+ meta := &ConsumerGroupMemberMetadata{
+ Topics: topics,
+ UserData: userData,
+ }
+ strategy := c.config.Consumer.Group.Rebalance.Strategy
+ if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+ return nil, err
+ }
+
+ return coordinator.JoinGroup(req)
+}
+
+func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
+ req := &SyncGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ GenerationId: generationID,
+ }
+ for memberID, topics := range plan {
+ assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+
+ // Include topic assignments in group-assignment userdata for each consumer-group member
+ if c.config.Consumer.Group.Rebalance.Strategy.Name() == StickyBalanceStrategyName {
+ userDataBytes, err := encode(&StickyAssignorUserDataV1{
+ Topics: topics,
+ Generation: generationID,
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+ assignment.UserData = userDataBytes
+ }
+ if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+ return nil, err
+ }
+ }
+ return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+ req := &HeartbeatRequest{
+ GroupId: c.groupID,
+ MemberId: memberID,
+ GenerationId: generationID,
+ }
+
+ return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
+ topics := make(map[string][]int32)
+ for _, meta := range members {
+ for _, topic := range meta.Topics {
+ topics[topic] = nil
+ }
+ }
+
+ for topic := range topics {
+ partitions, err := c.client.Partitions(topic)
+ if err != nil {
+ return nil, err
+ }
+ topics[topic] = partitions
+ }
+
+ strategy := c.config.Consumer.Group.Rebalance.Strategy
+ return strategy.Plan(members, topics)
+}
+
+// Leaves the cluster, called by Close, protected by lock.
+func (c *consumerGroup) leave() error {
+ if c.memberID == "" {
+ return nil
+ }
+
+ coordinator, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ return err
+ }
+
+ resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ })
+ if err != nil {
+ _ = coordinator.Close()
+ return err
+ }
+
+ // Unset memberID
+ c.memberID = ""
+
+ // Check response
+ switch resp.Err {
+ case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+ return nil
+ default:
+ return resp.Err
+ }
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+ select {
+ case <-c.closed:
+ return
+ default:
+ }
+
+ if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
+ err = &ConsumerError{
+ Topic: topic,
+ Partition: partition,
+ Err: err,
+ }
+ }
+
+ if c.config.Consumer.Return.Errors {
+ select {
+ case c.errors <- err:
+ default:
+ }
+ } else {
+ Logger.Println(err)
+ }
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
+ pause := time.NewTicker(c.config.Consumer.Group.Heartbeat.Interval * 2)
+ defer session.cancel()
+ defer pause.Stop()
+ var oldTopicToPartitionNum map[string]int
+ var err error
+ if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
+ return
+ }
+ for {
+ if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+ return
+ } else {
+ for topic, num := range oldTopicToPartitionNum {
+ if newTopicToPartitionNum[topic] != num {
+ return // trigger the end of the session on exit
+ }
+ }
+ }
+ select {
+ case <-pause.C:
+ case <-c.closed:
+ return
+ }
+ }
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+ if err := c.client.RefreshMetadata(topics...); err != nil {
+ Logger.Printf("Consumer Group refresh metadata failed %v", err)
+ return nil, err
+ }
+ topicToPartitionNum := make(map[string]int, len(topics))
+ for _, topic := range topics {
+ if partitionNum, err := c.client.Partitions(topic); err != nil {
+ Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
+ return nil, err
+ } else {
+ topicToPartitionNum[topic] = len(partitionNum)
+ }
+ }
+ return topicToPartitionNum, nil
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+ // Claims returns information about the claimed partitions by topic.
+ Claims() map[string][]int32
+
+ // MemberID returns the cluster member ID.
+ MemberID() string
+
+ // GenerationID returns the current generation ID.
+ GenerationID() int32
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+ // MarkMessage marks a message as consumed.
+ MarkMessage(msg *ConsumerMessage, metadata string)
+
+ // Context returns the session context.
+ Context() context.Context
+}
+
+type consumerGroupSession struct {
+ parent *consumerGroup
+ memberID string
+ generationID int32
+ handler ConsumerGroupHandler
+
+ claims map[string][]int32
+ offsets *offsetManager
+ ctx context.Context
+ cancel func()
+
+ waitGroup sync.WaitGroup
+ releaseOnce sync.Once
+ hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+ // init offset manager
+ offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
+ if err != nil {
+ return nil, err
+ }
+
+ // init context
+ ctx, cancel := context.WithCancel(ctx)
+
+ // init session
+ sess := &consumerGroupSession{
+ parent: parent,
+ memberID: memberID,
+ generationID: generationID,
+ handler: handler,
+ offsets: offsets,
+ claims: claims,
+ ctx: ctx,
+ cancel: cancel,
+ hbDying: make(chan none),
+ hbDead: make(chan none),
+ }
+
+ // start heartbeat loop
+ go sess.heartbeatLoop()
+
+ // create a POM for each claim
+ for topic, partitions := range claims {
+ for _, partition := range partitions {
+ pom, err := offsets.ManagePartition(topic, partition)
+ if err != nil {
+ _ = sess.release(false)
+ return nil, err
+ }
+
+ // handle POM errors
+ go func(topic string, partition int32) {
+ for err := range pom.Errors() {
+ sess.parent.handleError(err, topic, partition)
+ }
+ }(topic, partition)
+ }
+ }
+
+ // perform setup
+ if err := handler.Setup(sess); err != nil {
+ _ = sess.release(true)
+ return nil, err
+ }
+
+ // start consuming
+ for topic, partitions := range claims {
+ for _, partition := range partitions {
+ sess.waitGroup.Add(1)
+
+ go func(topic string, partition int32) {
+ defer sess.waitGroup.Done()
+
+ // cancel the as session as soon as the first
+ // goroutine exits
+ defer sess.cancel()
+
+ // consume a single topic/partition, blocking
+ sess.consume(topic, partition)
+ }(topic, partition)
+ }
+ }
+ return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ pom.MarkOffset(offset, metadata)
+ }
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ pom.ResetOffset(offset, metadata)
+ }
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+ s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+ return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+ // quick exit if rebalance is due
+ select {
+ case <-s.ctx.Done():
+ return
+ case <-s.parent.closed:
+ return
+ default:
+ }
+
+ // get next offset
+ offset := s.parent.config.Consumer.Offsets.Initial
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ offset, _ = pom.NextOffset()
+ }
+
+ // create new claim
+ claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+ if err != nil {
+ s.parent.handleError(err, topic, partition)
+ return
+ }
+
+ // handle errors
+ go func() {
+ for err := range claim.Errors() {
+ s.parent.handleError(err, topic, partition)
+ }
+ }()
+
+ // trigger close when session is done
+ go func() {
+ select {
+ case <-s.ctx.Done():
+ case <-s.parent.closed:
+ }
+ claim.AsyncClose()
+ }()
+
+ // start processing
+ if err := s.handler.ConsumeClaim(s, claim); err != nil {
+ s.parent.handleError(err, topic, partition)
+ }
+
+ // ensure consumer is closed & drained
+ claim.AsyncClose()
+ for _, err := range claim.waitClosed() {
+ s.parent.handleError(err, topic, partition)
+ }
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+ // signal release, stop heartbeat
+ s.cancel()
+
+ // wait for consumers to exit
+ s.waitGroup.Wait()
+
+ // perform release
+ s.releaseOnce.Do(func() {
+ if withCleanup {
+ if e := s.handler.Cleanup(s); e != nil {
+ s.parent.handleError(e, "", -1)
+ err = e
+ }
+ }
+
+ if e := s.offsets.Close(); e != nil {
+ err = e
+ }
+
+ close(s.hbDying)
+ <-s.hbDead
+ })
+
+ return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+ defer close(s.hbDead)
+ defer s.cancel() // trigger the end of the session on exit
+
+ pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+ defer pause.Stop()
+
+ retries := s.parent.config.Metadata.Retry.Max
+ for {
+ coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+ if err != nil {
+ if retries <= 0 {
+ s.parent.handleError(err, "", -1)
+ return
+ }
+
+ select {
+ case <-s.hbDying:
+ return
+ case <-time.After(s.parent.config.Metadata.Retry.Backoff):
+ retries--
+ }
+ continue
+ }
+
+ resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+ if err != nil {
+ _ = coordinator.Close()
+
+ if retries <= 0 {
+ s.parent.handleError(err, "", -1)
+ return
+ }
+
+ retries--
+ continue
+ }
+
+ switch resp.Err {
+ case ErrNoError:
+ retries = s.parent.config.Metadata.Retry.Max
+ case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
+ return
+ default:
+ s.parent.handleError(err, "", -1)
+ return
+ }
+
+ select {
+ case <-pause.C:
+ case <-s.hbDying:
+ return
+ }
+ }
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+ // Setup is run at the beginning of a new session, before ConsumeClaim.
+ Setup(ConsumerGroupSession) error
+
+ // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+ // but before the offsets are committed for the very last time.
+ Cleanup(ConsumerGroupSession) error
+
+ // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+ // Once the Messages() channel is closed, the Handler must finish its processing
+ // loop and exit.
+ ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+ // Topic returns the consumed topic name.
+ Topic() string
+
+ // Partition returns the consumed partition.
+ Partition() int32
+
+ // InitialOffset returns the initial offset that was used as a starting point for this claim.
+ InitialOffset() int64
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker. The messages channel will be closed when a new rebalance cycle
+ // is due. You must finish processing and mark offsets within
+ // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+ // re-assigned to another group member.
+ Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+ topic string
+ partition int32
+ offset int64
+ PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+ pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+ if err == ErrOffsetOutOfRange {
+ offset = sess.parent.config.Consumer.Offsets.Initial
+ pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ for err := range pcm.Errors() {
+ sess.parent.handleError(err, topic, partition)
+ }
+ }()
+
+ return &consumerGroupClaim{
+ topic: topic,
+ partition: partition,
+ offset: offset,
+ PartitionConsumer: pcm,
+ }, nil
+}
+
+func (c *consumerGroupClaim) Topic() string { return c.topic }
+func (c *consumerGroupClaim) Partition() int32 { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+ go func() {
+ for range c.Messages() {
+ }
+ }()
+
+ for err := range c.Errors() {
+ errs = append(errs, err)
+ }
+ return
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d02cc3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,96 @@
+package sarama
+
+//ConsumerGroupMemberMetadata holds the metadata for consumer group
+type ConsumerGroupMemberMetadata struct {
+ Version int16
+ Topics []string
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putStringArray(m.Topics); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ if m.Topics, err = pd.getStringArray(); err != nil {
+ return
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+//ConsumerGroupMemberAssignment holds the member assignment for a consume group
+type ConsumerGroupMemberAssignment struct {
+ Version int16
+ Topics map[string][]int32
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..a8dcaef
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,34 @@
+package sarama
+
+//ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ tmp := new(FindCoordinatorRequest)
+ tmp.CoordinatorKey = r.ConsumerGroup
+ tmp.CoordinatorType = CoordinatorGroup
+ return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ tmp := new(FindCoordinatorRequest)
+ if err := tmp.decode(pd, version); err != nil {
+ return err
+ }
+ r.ConsumerGroup = tmp.CoordinatorKey
+ return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..f39a871
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+//ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ tmp := new(FindCoordinatorResponse)
+
+ if err := tmp.decode(pd, version); err != nil {
+ return err
+ }
+
+ r.Err = tmp.Err
+
+ r.Coordinator = tmp.Coordinator
+ if tmp.Coordinator == nil {
+ return nil
+ }
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ if r.Coordinator == nil {
+ r.Coordinator = new(Broker)
+ r.Coordinator.id = r.CoordinatorID
+ r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+ }
+
+ tmp := &FindCoordinatorResponse{
+ Version: 0,
+ Err: r.Err,
+ Coordinator: r.Coordinator,
+ }
+
+ if err := tmp.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go
new file mode 100644
index 0000000..9b75ab5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/control_record.go
@@ -0,0 +1,72 @@
+package sarama
+
+//ControlRecordType ...
+type ControlRecordType int
+
+const (
+ //ControlRecordAbort is a control record for abort
+ ControlRecordAbort ControlRecordType = iota
+ //ControlRecordCommit is a control record for commit
+ ControlRecordCommit
+ //ControlRecordUnknown is a control record of unknown type
+ ControlRecordUnknown
+)
+
+// Control records are returned as a record by fetchRequest
+// However unlike "normal" records, they mean nothing application wise.
+// They only serve internal logic for supporting transactions.
+type ControlRecord struct {
+ Version int16
+ CoordinatorEpoch int32
+ Type ControlRecordType
+}
+
+func (cr *ControlRecord) decode(key, value packetDecoder) error {
+ var err error
+ cr.Version, err = value.getInt16()
+ if err != nil {
+ return err
+ }
+
+ cr.CoordinatorEpoch, err = value.getInt32()
+ if err != nil {
+ return err
+ }
+
+ // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
+ // Either way, all these version can only be 0 for now
+ cr.Version, err = key.getInt16()
+ if err != nil {
+ return err
+ }
+
+ recordType, err := key.getInt16()
+ if err != nil {
+ return err
+ }
+
+ switch recordType {
+ case 0:
+ cr.Type = ControlRecordAbort
+ case 1:
+ cr.Type = ControlRecordCommit
+ default:
+ // from JAVA implementation:
+ // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
+ cr.Type = ControlRecordUnknown
+ }
+ return nil
+}
+
+func (cr *ControlRecord) encode(key, value packetEncoder) {
+ value.putInt16(cr.Version)
+ value.putInt32(cr.CoordinatorEpoch)
+ key.putInt16(cr.Version)
+
+ switch cr.Type {
+ case ControlRecordAbort:
+ key.putInt16(0)
+ case ControlRecordCommit:
+ key.putInt16(1)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 0000000..38189a3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,86 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "sync"
+)
+
+type crcPolynomial int8
+
+const (
+ crcIEEE crcPolynomial = iota
+ crcCastagnoli
+)
+
+var crc32FieldPool = sync.Pool{}
+
+func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
+ val := crc32FieldPool.Get()
+ if val != nil {
+ c := val.(*crc32Field)
+ c.polynomial = polynomial
+ return c
+ }
+ return newCRC32Field(polynomial)
+}
+
+func releaseCrc32Field(c *crc32Field) {
+ crc32FieldPool.Put(c)
+}
+
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+ startOffset int
+ polynomial crcPolynomial
+}
+
+func (c *crc32Field) saveOffset(in int) {
+ c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+ return 4
+}
+
+func newCRC32Field(polynomial crcPolynomial) *crc32Field {
+ return &crc32Field{polynomial: polynomial}
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+ crc, err := c.crc(curOffset, buf)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+ return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+ crc, err := c.crc(curOffset, buf)
+ if err != nil {
+ return err
+ }
+
+ expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+ if crc != expected {
+ return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
+ }
+
+ return nil
+}
+func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
+ var tab *crc32.Table
+ switch c.polynomial {
+ case crcIEEE:
+ tab = crc32.IEEETable
+ case crcCastagnoli:
+ tab = castagnoliTable
+ default:
+ return 0, PacketDecodingError{"invalid CRC type"}
+ }
+ return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
new file mode 100644
index 0000000..af321e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -0,0 +1,121 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+ TopicPartitions map[string]*TopicPartition
+ Timeout time.Duration
+ ValidateOnly bool
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+ return err
+ }
+
+ for topic, partition := range c.TopicPartitions {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := partition.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+ pe.putBool(c.ValidateOnly)
+
+ return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitions = make(map[string]*TopicPartition, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitions[topic] = new(TopicPartition)
+ if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.Timeout = time.Duration(timeout) * time.Millisecond
+
+ if c.ValidateOnly, err = pd.getBool(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+ return 37
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+ return 0
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
+
+type TopicPartition struct {
+ Count int32
+ Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+ pe.putInt32(t.Count)
+
+ if len(t.Assignment) == 0 {
+ pe.putInt32(-1)
+ return nil
+ }
+
+ if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+ return err
+ }
+
+ for _, assign := range t.Assignment {
+ if err := pe.putInt32Array(assign); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+ if t.Count, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ n, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if n <= 0 {
+ return nil
+ }
+ t.Assignment = make([][]int32, n)
+
+ for i := 0; i < int(n); i++ {
+ if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
new file mode 100644
index 0000000..bb18204
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -0,0 +1,105 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type CreatePartitionsResponse struct {
+ ThrottleTime time.Duration
+ TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+ if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+ return err
+ }
+
+ for topic, partitionError := range c.TopicPartitionErrors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+ if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+ return 37
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+ return 0
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
+
+type TopicPartitionError struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (t *TopicPartitionError) Error() string {
+ text := t.Err.Error()
+ if t.ErrMsg != nil {
+ text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+ }
+ return text
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+ pe.putInt16(int16(t.Err))
+
+ if err := pe.putNullableString(t.ErrMsg); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ t.Err = KError(kerr)
+
+ if t.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
new file mode 100644
index 0000000..709c0a4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -0,0 +1,174 @@
+package sarama
+
+import (
+ "time"
+)
+
+type CreateTopicsRequest struct {
+ Version int16
+
+ TopicDetails map[string]*TopicDetail
+ Timeout time.Duration
+ ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+ return err
+ }
+ for topic, detail := range c.TopicDetails {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := detail.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+ if c.Version >= 1 {
+ pe.putBool(c.ValidateOnly)
+ }
+
+ return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicDetails = make(map[string]*TopicDetail, n)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicDetails[topic] = new(TopicDetail)
+ if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.Timeout = time.Duration(timeout) * time.Millisecond
+
+ if version >= 1 {
+ c.ValidateOnly, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ c.Version = version
+ }
+
+ return nil
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+ return 19
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+ return c.Version
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 2:
+ return V1_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_10_1_0
+ }
+}
+
+type TopicDetail struct {
+ NumPartitions int32
+ ReplicationFactor int16
+ ReplicaAssignment map[int32][]int32
+ ConfigEntries map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+ pe.putInt32(t.NumPartitions)
+ pe.putInt16(t.ReplicationFactor)
+
+ if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+ return err
+ }
+ for partition, assignment := range t.ReplicaAssignment {
+ pe.putInt32(partition)
+ if err := pe.putInt32Array(assignment); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+ return err
+ }
+ for configKey, configValue := range t.ConfigEntries {
+ if err := pe.putString(configKey); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(configValue); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+ if t.NumPartitions, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.ReplicaAssignment = make(map[int32][]int32, n)
+ for i := 0; i < n; i++ {
+ replica, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+ return err
+ }
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.ConfigEntries = make(map[string]*string, n)
+ for i := 0; i < n; i++ {
+ configKey, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
new file mode 100644
index 0000000..a493e02
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type CreateTopicsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ TopicErrors map[string]*TopicError
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+ if c.Version >= 2 {
+ pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+ }
+
+ if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+ return err
+ }
+ for topic, topicError := range c.TopicErrors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := topicError.encode(pe, c.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+ c.Version = version
+
+ if version >= 2 {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicErrors = make(map[string]*TopicError, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicErrors[topic] = new(TopicError)
+ if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+ return 19
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+ return c.Version
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 2:
+ return V1_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_10_1_0
+ }
+}
+
+type TopicError struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (t *TopicError) Error() string {
+ text := t.Err.Error()
+ if t.ErrMsg != nil {
+ text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+ }
+ return text
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+ pe.putInt16(int16(t.Err))
+
+ if version >= 1 {
+ if err := pe.putNullableString(t.ErrMsg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+ kErr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ t.Err = KError(kErr)
+
+ if version >= 1 {
+ if t.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
new file mode 100644
index 0000000..eaccbfc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/decompress.go
@@ -0,0 +1,63 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "sync"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+var (
+ lz4ReaderPool = sync.Pool{
+ New: func() interface{} {
+ return lz4.NewReader(nil)
+ },
+ }
+
+ gzipReaderPool sync.Pool
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+ switch cc {
+ case CompressionNone:
+ return data, nil
+ case CompressionGZIP:
+ var (
+ err error
+ reader *gzip.Reader
+ readerIntf = gzipReaderPool.Get()
+ )
+ if readerIntf != nil {
+ reader = readerIntf.(*gzip.Reader)
+ } else {
+ reader, err = gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer gzipReaderPool.Put(reader)
+
+ if err := reader.Reset(bytes.NewReader(data)); err != nil {
+ return nil, err
+ }
+
+ return ioutil.ReadAll(reader)
+ case CompressionSnappy:
+ return snappy.Decode(data)
+ case CompressionLZ4:
+ reader := lz4ReaderPool.Get().(*lz4.Reader)
+ defer lz4ReaderPool.Put(reader)
+
+ reader.Reset(bytes.NewReader(data))
+ return ioutil.ReadAll(reader)
+ case CompressionZSTD:
+ return zstdDecompress(nil, data)
+ default:
+ return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
new file mode 100644
index 0000000..305a324
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+ return 42
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+ return V1_1_0_0
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
new file mode 100644
index 0000000..c067ebb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go
@@ -0,0 +1,70 @@
+package sarama
+
+import (
+ "time"
+)
+
+type DeleteGroupsResponse struct {
+ ThrottleTime time.Duration
+ GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+ return err
+ }
+ for groupID, errorCode := range r.GroupErrorCodes {
+ if err := pe.putString(groupID); err != nil {
+ return err
+ }
+ pe.putInt16(int16(errorCode))
+ }
+
+ return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupErrorCodes = make(map[string]KError, n)
+ for i := 0; i < n; i++ {
+ groupID, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ errorCode, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.GroupErrorCodes[groupID] = KError(errorCode)
+ }
+
+ return nil
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+ return 42
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+ return V1_1_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
new file mode 100644
index 0000000..93efafd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+import (
+ "sort"
+ "time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+// name(string) [partition]
+// where partition is:
+// id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+ Topics map[string]*DeleteRecordsRequestTopic
+ Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(d.Topics)); err != nil {
+ return err
+ }
+ keys := make([]string, 0, len(d.Topics))
+ for topic := range d.Topics {
+ keys = append(keys, topic)
+ }
+ sort.Strings(keys)
+ for _, topic := range keys {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := d.Topics[topic].encode(pe); err != nil {
+ return err
+ }
+ }
+ pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+ return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsRequestTopic)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ d.Topics[topic] = details
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.Timeout = time.Duration(timeout) * time.Millisecond
+
+ return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+ return 21
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+ return 0
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+type DeleteRecordsRequestTopic struct {
+ PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+ return err
+ }
+ keys := make([]int32, 0, len(t.PartitionOffsets))
+ for partition := range t.PartitionOffsets {
+ keys = append(keys, partition)
+ }
+ sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+ for _, partition := range keys {
+ pe.putInt32(partition)
+ pe.putInt64(t.PartitionOffsets[partition])
+ }
+ return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.PartitionOffsets = make(map[int32]int64, n)
+ for i := 0; i < n; i++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ offset, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ t.PartitionOffsets[partition] = offset
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
new file mode 100644
index 0000000..733a58b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_response.go
@@ -0,0 +1,158 @@
+package sarama
+
+import (
+ "sort"
+ "time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+// name(string) [partition]
+// where partition is:
+// id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Topics map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(d.Topics)); err != nil {
+ return err
+ }
+ keys := make([]string, 0, len(d.Topics))
+ for topic := range d.Topics {
+ keys = append(keys, topic)
+ }
+ sort.Strings(keys)
+ for _, topic := range keys {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := d.Topics[topic].encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
+ d.Version = version
+
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsResponseTopic)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ d.Topics[topic] = details
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+ return 21
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+ return 0
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+type DeleteRecordsResponseTopic struct {
+ Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+ return err
+ }
+ keys := make([]int32, 0, len(t.Partitions))
+ for partition := range t.Partitions {
+ keys = append(keys, partition)
+ }
+ sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+ for _, partition := range keys {
+ pe.putInt32(partition)
+ if err := t.Partitions[partition].encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+ for i := 0; i < n; i++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsResponsePartition)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ t.Partitions[partition] = details
+ }
+ }
+
+ return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+ LowWatermark int64
+ Err KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+ pe.putInt64(t.LowWatermark)
+ pe.putInt16(int16(t.Err))
+ return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+ lowWatermark, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ t.LowWatermark = lowWatermark
+
+ kErr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ t.Err = KError(kErr)
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
new file mode 100644
index 0000000..911f67d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+ Version int16
+ Topics []string
+ Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+ if err := pe.putStringArray(d.Topics); err != nil {
+ return err
+ }
+ pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+ return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+ if d.Topics, err = pd.getStringArray(); err != nil {
+ return err
+ }
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.Timeout = time.Duration(timeout) * time.Millisecond
+ d.Version = version
+ return nil
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+ return 20
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_10_1_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
new file mode 100644
index 0000000..3422546
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+ if d.Version >= 1 {
+ pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+ }
+
+ if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+ return err
+ }
+ for topic, errorCode := range d.TopicErrorCodes {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ pe.putInt16(int16(errorCode))
+ }
+
+ return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 1 {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ d.Version = version
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ d.TopicErrorCodes = make(map[string]KError, n)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ errorCode, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ d.TopicErrorCodes[topic] = KError(errorCode)
+ }
+
+ return nil
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+ return 20
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_10_1_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
new file mode 100644
index 0000000..ccb587b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -0,0 +1,112 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+ Version int16
+ Resources []*ConfigResource
+ IncludeSynonyms bool
+}
+
+type ConfigResource struct {
+ Type ConfigResourceType
+ Name string
+ ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Resources)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Resources {
+ pe.putInt8(int8(c.Type))
+ if err := pe.putString(c.Name); err != nil {
+ return err
+ }
+
+ if len(c.ConfigNames) == 0 {
+ pe.putInt32(-1)
+ continue
+ }
+ if err := pe.putStringArray(c.ConfigNames); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putBool(r.IncludeSynonyms)
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Resources = make([]*ConfigResource, n)
+
+ for i := 0; i < n; i++ {
+ r.Resources[i] = &ConfigResource{}
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Resources[i].Type = ConfigResourceType(t)
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Resources[i].Name = name
+
+ confLength, err := pd.getArrayLength()
+
+ if err != nil {
+ return err
+ }
+
+ if confLength == -1 {
+ continue
+ }
+
+ cfnames := make([]string, confLength)
+ for i := 0; i < confLength; i++ {
+ s, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ cfnames[i] = s
+ }
+ r.Resources[i].ConfigNames = cfnames
+ }
+ r.Version = version
+ if r.Version >= 1 {
+ b, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IncludeSynonyms = b
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+ return 32
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V1_1_0_0
+ case 2:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
new file mode 100644
index 0000000..5737232
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+ switch s {
+ case SourceUnknown:
+ return "Unknown"
+ case SourceTopic:
+ return "Topic"
+ case SourceDynamicBroker:
+ return "DynamicBroker"
+ case SourceDynamicDefaultBroker:
+ return "DynamicDefaultBroker"
+ case SourceStaticBroker:
+ return "StaticBroker"
+ case SourceDefault:
+ return "Default"
+ }
+ return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+ SourceUnknown ConfigSource = iota
+ SourceTopic
+ SourceDynamicBroker
+ SourceDynamicDefaultBroker
+ SourceStaticBroker
+ SourceDefault
+)
+
+type DescribeConfigsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Resources []*ResourceResponse
+}
+
+type ResourceResponse struct {
+ ErrorCode int16
+ ErrorMsg string
+ Type ConfigResourceType
+ Name string
+ Configs []*ConfigEntry
+}
+
+type ConfigEntry struct {
+ Name string
+ Value string
+ ReadOnly bool
+ Default bool
+ Source ConfigSource
+ Sensitive bool
+ Synonyms []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+ ConfigName string
+ ConfigValue string
+ Source ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ if err = pe.putArrayLength(len(r.Resources)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Resources {
+ if err = c.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Resources = make([]*ResourceResponse, n)
+ for i := 0; i < n; i++ {
+ rr := &ResourceResponse{}
+ if err := rr.decode(pd, version); err != nil {
+ return err
+ }
+ r.Resources[i] = rr
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+ return 32
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V1_0_0_0
+ case 2:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(r.ErrorCode)
+
+ if err = pe.putString(r.ErrorMsg); err != nil {
+ return err
+ }
+
+ pe.putInt8(int8(r.Type))
+
+ if err = pe.putString(r.Name); err != nil {
+ return err
+ }
+
+ if err = pe.putArrayLength(len(r.Configs)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Configs {
+ if err = c.encode(pe, version); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+ ec, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.ErrorCode = ec
+
+ em, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.ErrorMsg = em
+
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Name = name
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Configs = make([]*ConfigEntry, n)
+ for i := 0; i < n; i++ {
+ c := &ConfigEntry{}
+ if err := c.decode(pd, version); err != nil {
+ return err
+ }
+ r.Configs[i] = c
+ }
+ return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+ if err = pe.putString(r.Name); err != nil {
+ return err
+ }
+
+ if err = pe.putString(r.Value); err != nil {
+ return err
+ }
+
+ pe.putBool(r.ReadOnly)
+
+ if version <= 0 {
+ pe.putBool(r.Default)
+ pe.putBool(r.Sensitive)
+ } else {
+ pe.putInt8(int8(r.Source))
+ pe.putBool(r.Sensitive)
+
+ if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+ return err
+ }
+ for _, c := range r.Synonyms {
+ if err = c.encode(pe, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+ if version == 0 {
+ r.Source = SourceUnknown
+ }
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Name = name
+
+ value, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Value = value
+
+ read, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.ReadOnly = read
+
+ if version == 0 {
+ defaultB, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.Default = defaultB
+ } else {
+ source, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Source = ConfigSource(source)
+ }
+
+ sensitive, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.Sensitive = sensitive
+
+ if version > 0 {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Synonyms = make([]*ConfigSynonym, n)
+
+ for i := 0; i < n; i++ {
+ s := &ConfigSynonym{}
+ if err := s.decode(pd, version); err != nil {
+ return err
+ }
+ r.Synonyms[i] = s
+ }
+
+ }
+ return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+ err = pe.putString(c.ConfigName)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putString(c.ConfigValue)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt8(int8(c.Source))
+
+ return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+ name, err := pd.getString()
+ if err != nil {
+ return nil
+ }
+ c.ConfigName = name
+
+ value, err := pd.getString()
+ if err != nil {
+ return nil
+ }
+ c.ConfigValue = value
+
+ source, err := pd.getInt8()
+ if err != nil {
+ return nil
+ }
+ c.Source = ConfigSource(source)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 0000000..1fb3567
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 0000000..542b3a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, groupDescription := range r.Groups {
+ if err := groupDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Groups = make([]*GroupDescription, n)
+ for i := 0; i < n; i++ {
+ r.Groups[i] = new(GroupDescription)
+ if err := r.Groups[i].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+type GroupDescription struct {
+ Err KError
+ GroupId string
+ State string
+ ProtocolType string
+ Protocol string
+ Members map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+ pe.putInt16(int16(gd.Err))
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for memberId, groupMemberDescription := range gd.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := groupMemberDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ gd.Err = KError(kerr)
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ gd.Members = make(map[string]*GroupMemberDescription)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ gd.Members[memberId] = new(GroupMemberDescription)
+ if err := gd.Members[memberId].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupMemberDescription struct {
+ ClientId string
+ ClientHost string
+ MemberMetadata []byte
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(gmd.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+ metadata := new(ConsumerGroupMemberMetadata)
+ err := decode(gmd.MemberMetadata, metadata)
+ return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..cb1e781
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
@@ -0,0 +1,83 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+ // Version 0 and 1 are equal
+ // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+ Version int16
+
+ // If this is an empty array, all topics will be queried
+ DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+ Topic string
+ PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+ length := len(r.DescribeTopics)
+ if length == 0 {
+ // In order to query all topics we must send null
+ length = -1
+ }
+
+ if err := pe.putArrayLength(length); err != nil {
+ return err
+ }
+
+ for _, d := range r.DescribeTopics {
+ if err := pe.putString(d.Topic); err != nil {
+ return err
+ }
+
+ if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == -1 {
+ n = 0
+ }
+
+ topics := make([]DescribeLogDirsRequestTopic, n)
+ for i := 0; i < n; i++ {
+ topics[i] = DescribeLogDirsRequestTopic{}
+
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ topics[i].Topic = topic
+
+ pIDs, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ topics[i].PartitionIDs = pIDs
+ }
+ r.DescribeTopics = topics
+
+ return nil
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+ return 35
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..d207312
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
@@ -0,0 +1,219 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+ ThrottleTime time.Duration
+
+ // Version 0 and 1 are equal
+ // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+ Version int16
+
+ LogDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+ if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+ return err
+ }
+
+ for _, dir := range r.LogDirs {
+ if err := dir.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ // Decode array of DescribeLogDirsResponseDirMetadata
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+ for i := 0; i < n; i++ {
+ dir := DescribeLogDirsResponseDirMetadata{}
+ if err := dir.decode(pd, version); err != nil {
+ return err
+ }
+ r.LogDirs[i] = dir
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+ return 35
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+ ErrorCode KError
+
+ // The absolute log directory path
+ Path string
+ Topics []DescribeLogDirsResponseTopic
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.ErrorCode))
+
+ if err := pe.putString(r.Path); err != nil {
+ return err
+ }
+
+ for _, topic := range r.Topics {
+ if err := topic.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
+ errCode, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.ErrorCode = KError(errCode)
+
+ path, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Path = path
+
+ // Decode array of DescribeLogDirsResponseTopic
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]DescribeLogDirsResponseTopic, n)
+ for i := 0; i < n; i++ {
+ t := DescribeLogDirsResponseTopic{}
+
+ if err := t.decode(pd, version); err != nil {
+ return err
+ }
+
+ r.Topics[i] = t
+ }
+
+ return nil
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+ Topic string
+ Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Topic); err != nil {
+ return err
+ }
+
+ for _, partition := range r.Partitions {
+ if err := partition.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topic = t
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+ for i := 0; i < n; i++ {
+ p := DescribeLogDirsResponsePartition{}
+ if err := p.decode(pd, version); err != nil {
+ return err
+ }
+ r.Partitions[i] = p
+ }
+
+ return nil
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+ PartitionID int32
+
+ // The size of the log segments of the partition in bytes.
+ Size int64
+
+ // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+ // current replica's LEO (if it is the future log for the partition)
+ OffsetLag int64
+
+ // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+ // the replica in the future.
+ IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
+ pe.putInt32(r.PartitionID)
+ pe.putInt64(r.Size)
+ pe.putInt64(r.OffsetLag)
+ pe.putBool(r.IsTemporary)
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+ pID, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.PartitionID = pID
+
+ size, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.Size = size
+
+ lag, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.OffsetLag = lag
+
+ isTemp, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IsTemporary = isTemp
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 0000000..481f681
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+ - go:
+ version: '1.13.1'
+
+commands:
+ test:
+ run: make test
+ desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 0000000..7ce3bc0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+ "fmt"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(&prepEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ realEnc.registry = metricRegistry
+ err = e.encode(&realEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+ decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper, version)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
new file mode 100644
index 0000000..2cd9b50
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -0,0 +1,50 @@
+package sarama
+
+type EndTxnRequest struct {
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ TransactionResult bool
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+
+ pe.putInt64(a.ProducerID)
+
+ pe.putInt16(a.ProducerEpoch)
+
+ pe.putBool(a.TransactionResult)
+
+ return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if a.TransactionResult, err = pd.getBool(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+ return 26
+}
+
+func (a *EndTxnRequest) version() int16 {
+ return 0
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
new file mode 100644
index 0000000..33b27e3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+ "time"
+)
+
+type EndTxnResponse struct {
+ ThrottleTime time.Duration
+ Err KError
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
+ pe.putInt16(int16(e.Err))
+ return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ e.Err = KError(kerr)
+
+ return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+ return 25
+}
+
+func (e *EndTxnResponse) version() int16 {
+ return 0
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 0000000..97be3c0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,369 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// MultiError is used to contain multi error.
+type MultiError struct {
+ Errors *[]error
+}
+
+func (mErr MultiError) Error() string {
+ var errString = ""
+ for _, err := range *mErr.Errors {
+ errString += err.Error() + ","
+ }
+ return errString
+}
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+type ErrDeleteRecords struct {
+ MultiError
+}
+
+func (err ErrDeleteRecords) Error() string {
+ return "kafka server: failed to delete records " + err.MultiError.Error()
+}
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrNoError KError = 0
+ ErrUnknown KError = -1
+ ErrOffsetOutOfRange KError = 1
+ ErrInvalidMessage KError = 2
+ ErrUnknownTopicOrPartition KError = 3
+ ErrInvalidMessageSize KError = 4
+ ErrLeaderNotAvailable KError = 5
+ ErrNotLeaderForPartition KError = 6
+ ErrRequestTimedOut KError = 7
+ ErrBrokerNotAvailable KError = 8
+ ErrReplicaNotAvailable KError = 9
+ ErrMessageSizeTooLarge KError = 10
+ ErrStaleControllerEpochCode KError = 11
+ ErrOffsetMetadataTooLarge KError = 12
+ ErrNetworkException KError = 13
+ ErrOffsetsLoadInProgress KError = 14
+ ErrConsumerCoordinatorNotAvailable KError = 15
+ ErrNotCoordinatorForConsumer KError = 16
+ ErrInvalidTopic KError = 17
+ ErrMessageSetSizeTooLarge KError = 18
+ ErrNotEnoughReplicas KError = 19
+ ErrNotEnoughReplicasAfterAppend KError = 20
+ ErrInvalidRequiredAcks KError = 21
+ ErrIllegalGeneration KError = 22
+ ErrInconsistentGroupProtocol KError = 23
+ ErrInvalidGroupId KError = 24
+ ErrUnknownMemberId KError = 25
+ ErrInvalidSessionTimeout KError = 26
+ ErrRebalanceInProgress KError = 27
+ ErrInvalidCommitOffsetSize KError = 28
+ ErrTopicAuthorizationFailed KError = 29
+ ErrGroupAuthorizationFailed KError = 30
+ ErrClusterAuthorizationFailed KError = 31
+ ErrInvalidTimestamp KError = 32
+ ErrUnsupportedSASLMechanism KError = 33
+ ErrIllegalSASLState KError = 34
+ ErrUnsupportedVersion KError = 35
+ ErrTopicAlreadyExists KError = 36
+ ErrInvalidPartitions KError = 37
+ ErrInvalidReplicationFactor KError = 38
+ ErrInvalidReplicaAssignment KError = 39
+ ErrInvalidConfig KError = 40
+ ErrNotController KError = 41
+ ErrInvalidRequest KError = 42
+ ErrUnsupportedForMessageFormat KError = 43
+ ErrPolicyViolation KError = 44
+ ErrOutOfOrderSequenceNumber KError = 45
+ ErrDuplicateSequenceNumber KError = 46
+ ErrInvalidProducerEpoch KError = 47
+ ErrInvalidTxnState KError = 48
+ ErrInvalidProducerIDMapping KError = 49
+ ErrInvalidTransactionTimeout KError = 50
+ ErrConcurrentTransactions KError = 51
+ ErrTransactionCoordinatorFenced KError = 52
+ ErrTransactionalIDAuthorizationFailed KError = 53
+ ErrSecurityDisabled KError = 54
+ ErrOperationNotAttempted KError = 55
+ ErrKafkaStorageError KError = 56
+ ErrLogDirNotFound KError = 57
+ ErrSASLAuthenticationFailed KError = 58
+ ErrUnknownProducerID KError = 59
+ ErrReassignmentInProgress KError = 60
+ ErrDelegationTokenAuthDisabled KError = 61
+ ErrDelegationTokenNotFound KError = 62
+ ErrDelegationTokenOwnerMismatch KError = 63
+ ErrDelegationTokenRequestNotAllowed KError = 64
+ ErrDelegationTokenAuthorizationFailed KError = 65
+ ErrDelegationTokenExpired KError = 66
+ ErrInvalidPrincipalType KError = 67
+ ErrNonEmptyGroup KError = 68
+ ErrGroupIDNotFound KError = 69
+ ErrFetchSessionIDNotFound KError = 70
+ ErrInvalidFetchSessionEpoch KError = 71
+ ErrListenerNotFound KError = 72
+ ErrTopicDeletionDisabled KError = 73
+ ErrFencedLeaderEpoch KError = 74
+ ErrUnknownLeaderEpoch KError = 75
+ ErrUnsupportedCompressionType KError = 76
+ ErrStaleBrokerEpoch KError = 77
+ ErrOffsetNotAvailable KError = 78
+ ErrMemberIdRequired KError = 79
+ ErrPreferredLeaderNotAvailable KError = 80
+ ErrGroupMaxSizeReached KError = 81
+ ErrFencedInstancedId KError = 82
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://kafka.apache.org/protocol#protocol_error_codes
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error."
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC."
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker."
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size."
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request."
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica information not available, one or more brokers are down."
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error."
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+ case ErrNetworkException:
+ return "kafka server: The server disconnected before a response was received."
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: Offset's topic has not yet been created."
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic."
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server."
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation."
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members."
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty."
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation."
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range."
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large."
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic."
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group."
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type."
+ case ErrInvalidTimestamp:
+ return "kafka server: The timestamp of the message is out of acceptable range."
+ case ErrUnsupportedSASLMechanism:
+ return "kafka server: The broker does not support the requested SASL mechanism."
+ case ErrIllegalSASLState:
+ return "kafka server: Request is not valid given the current SASL state."
+ case ErrUnsupportedVersion:
+ return "kafka server: The version of API is not supported."
+ case ErrTopicAlreadyExists:
+ return "kafka server: Topic with this name already exists."
+ case ErrInvalidPartitions:
+ return "kafka server: Number of partitions is invalid."
+ case ErrInvalidReplicationFactor:
+ return "kafka server: Replication-factor is invalid."
+ case ErrInvalidReplicaAssignment:
+ return "kafka server: Replica assignment is invalid."
+ case ErrInvalidConfig:
+ return "kafka server: Configuration is invalid."
+ case ErrNotController:
+ return "kafka server: This is not the correct controller for this cluster."
+ case ErrInvalidRequest:
+ return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+ case ErrUnsupportedForMessageFormat:
+ return "kafka server: The requested operation is not supported by the message format version."
+ case ErrPolicyViolation:
+ return "kafka server: Request parameters do not satisfy the configured policy."
+ case ErrOutOfOrderSequenceNumber:
+ return "kafka server: The broker received an out of order sequence number."
+ case ErrDuplicateSequenceNumber:
+ return "kafka server: The broker received a duplicate sequence number."
+ case ErrInvalidProducerEpoch:
+ return "kafka server: Producer attempted an operation with an old epoch."
+ case ErrInvalidTxnState:
+ return "kafka server: The producer attempted a transactional operation in an invalid state."
+ case ErrInvalidProducerIDMapping:
+ return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
+ case ErrInvalidTransactionTimeout:
+ return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
+ case ErrConcurrentTransactions:
+ return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
+ case ErrTransactionCoordinatorFenced:
+ return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
+ case ErrTransactionalIDAuthorizationFailed:
+ return "kafka server: Transactional ID authorization failed."
+ case ErrSecurityDisabled:
+ return "kafka server: Security features are disabled."
+ case ErrOperationNotAttempted:
+ return "kafka server: The broker did not attempt to execute this operation."
+ case ErrKafkaStorageError:
+ return "kafka server: Disk error when trying to access log file on the disk."
+ case ErrLogDirNotFound:
+ return "kafka server: The specified log directory is not found in the broker config."
+ case ErrSASLAuthenticationFailed:
+ return "kafka server: SASL Authentication failed."
+ case ErrUnknownProducerID:
+ return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
+ case ErrReassignmentInProgress:
+ return "kafka server: A partition reassignment is in progress."
+ case ErrDelegationTokenAuthDisabled:
+ return "kafka server: Delegation Token feature is not enabled."
+ case ErrDelegationTokenNotFound:
+ return "kafka server: Delegation Token is not found on server."
+ case ErrDelegationTokenOwnerMismatch:
+ return "kafka server: Specified Principal is not valid Owner/Renewer."
+ case ErrDelegationTokenRequestNotAllowed:
+ return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
+ case ErrDelegationTokenAuthorizationFailed:
+ return "kafka server: Delegation Token authorization failed."
+ case ErrDelegationTokenExpired:
+ return "kafka server: Delegation Token is expired."
+ case ErrInvalidPrincipalType:
+ return "kafka server: Supplied principalType is not supported."
+ case ErrNonEmptyGroup:
+ return "kafka server: The group is not empty."
+ case ErrGroupIDNotFound:
+ return "kafka server: The group id does not exist."
+ case ErrFetchSessionIDNotFound:
+ return "kafka server: The fetch session ID was not found."
+ case ErrInvalidFetchSessionEpoch:
+ return "kafka server: The fetch session epoch is invalid."
+ case ErrListenerNotFound:
+ return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
+ case ErrTopicDeletionDisabled:
+ return "kafka server: Topic deletion is disabled."
+ case ErrFencedLeaderEpoch:
+ return "kafka server: The leader epoch in the request is older than the epoch on the broker."
+ case ErrUnknownLeaderEpoch:
+ return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
+ case ErrUnsupportedCompressionType:
+ return "kafka server: The requesting client does not support the compression type of given partition."
+ case ErrStaleBrokerEpoch:
+ return "kafka server: Broker epoch has changed"
+ case ErrOffsetNotAvailable:
+ return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+ case ErrMemberIdRequired:
+ return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+ case ErrPreferredLeaderNotAvailable:
+ return "kafka server: The preferred leader was not available"
+ case ErrGroupMaxSizeReached:
+ return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
+ case ErrFencedInstancedId:
+ return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 0000000..4db9ddd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,170 @@
+package sarama
+
+type fetchRequestBlock struct {
+ fetchOffset int64
+ maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(b.fetchOffset)
+ pe.putInt32(b.maxBytes)
+ return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+ if b.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if b.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+ MaxWaitTime int32
+ MinBytes int32
+ MaxBytes int32
+ Version int16
+ Isolation IsolationLevel
+ blocks map[string]map[int32]*fetchRequestBlock
+}
+
+type IsolationLevel int8
+
+const (
+ ReadUncommitted IsolationLevel = iota
+ ReadCommitted
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ pe.putInt32(r.MaxWaitTime)
+ pe.putInt32(r.MinBytes)
+ if r.Version >= 3 {
+ pe.putInt32(r.MaxBytes)
+ }
+ if r.Version >= 4 {
+ pe.putInt8(int8(r.Isolation))
+ }
+ err = pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version >= 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ if r.Version >= 4 {
+ isolation, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Isolation = IsolationLevel(isolation)
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = fetchBlock
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) key() int16 {
+ return 1
+}
+
+func (r *FetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ case 4:
+ return V0_11_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 0000000..3afc187
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,489 @@
+package sarama
+
+import (
+ "sort"
+ "time"
+)
+
+type AbortedTransaction struct {
+ ProducerID int64
+ FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+ if t.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if t.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+ pe.putInt64(t.ProducerID)
+ pe.putInt64(t.FirstOffset)
+
+ return nil
+}
+
+type FetchResponseBlock struct {
+ Err KError
+ HighWaterMarkOffset int64
+ LastStableOffset int64
+ AbortedTransactions []*AbortedTransaction
+ Records *Records // deprecated: use FetchResponseBlock.RecordsSet
+ RecordsSet []*Records
+ Partial bool
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 4 {
+ b.LastStableOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ numTransact, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTransact >= 0 {
+ b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+ }
+
+ for i := 0; i < numTransact; i++ {
+ transact := new(AbortedTransaction)
+ if err = transact.decode(pd); err != nil {
+ return err
+ }
+ b.AbortedTransactions[i] = transact
+ }
+ }
+
+ recordsSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ recordsDecoder, err := pd.getSubset(int(recordsSize))
+ if err != nil {
+ return err
+ }
+
+ b.RecordsSet = []*Records{}
+
+ for recordsDecoder.remaining() > 0 {
+ records := &Records{}
+ if err := records.decode(recordsDecoder); err != nil {
+ // If we have at least one decoded records, this is not an error
+ if err == ErrInsufficientData {
+ if len(b.RecordsSet) == 0 {
+ b.Partial = true
+ }
+ break
+ }
+ return err
+ }
+
+ partial, err := records.isPartial()
+ if err != nil {
+ return err
+ }
+
+ n, err := records.numRecords()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 || (partial && len(b.RecordsSet) == 0) {
+ b.RecordsSet = append(b.RecordsSet, records)
+
+ if b.Records == nil {
+ b.Records = records
+ }
+ }
+
+ overflow, err := records.isOverflow()
+ if err != nil {
+ return err
+ }
+
+ if partial || overflow {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+ sum := 0
+
+ for _, records := range b.RecordsSet {
+ count, err := records.numRecords()
+ if err != nil {
+ return 0, err
+ }
+
+ sum += count
+ }
+
+ return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+ if b.Partial {
+ return true, nil
+ }
+
+ if len(b.RecordsSet) == 1 {
+ return b.RecordsSet[0].isPartial()
+ }
+
+ return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ pe.putInt64(b.HighWaterMarkOffset)
+
+ if version >= 4 {
+ pe.putInt64(b.LastStableOffset)
+
+ if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+ return err
+ }
+ for _, transact := range b.AbortedTransactions {
+ if err = transact.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ pe.push(&lengthField{})
+ for _, records := range b.RecordsSet {
+ err = records.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return pe.pop()
+}
+
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+ // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+ // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+ at := b.AbortedTransactions
+ sort.Slice(
+ at,
+ func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+ )
+ return at
+}
+
+type FetchResponse struct {
+ Blocks map[string]map[int32]*FetchResponseBlock
+ ThrottleTime time.Duration
+ Version int16 // v1 requires 0.9+, v2 requires 0.10+
+ LogAppendTime bool
+ Timestamp time.Time
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.Version >= 1 {
+ throttle, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func (r *FetchResponse) key() int16 {
+ return 1
+}
+
+func (r *FetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ case 4:
+ return V0_11_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+
+ return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+
+ return kb, vb
+}
+
+func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ if r.LogAppendTime {
+ timestamp = r.Timestamp
+ }
+ msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ if len(frb.RecordsSet) == 0 {
+ records := newLegacyRecords(&MessageSet{})
+ frb.RecordsSet = []*Records{&records}
+ }
+ set := frb.RecordsSet[0].MsgSet
+ set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ if len(frb.RecordsSet) == 0 {
+ records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+ frb.RecordsSet = []*Records{&records}
+ }
+ batch := frb.RecordsSet[0].RecordBatch
+ rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+}
+
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+
+ records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+ batch := &RecordBatch{
+ Version: 2,
+ LogAppendTime: r.LogAppendTime,
+ FirstTimestamp: timestamp,
+ MaxTimestamp: r.Timestamp,
+ FirstOffset: offset,
+ LastOffsetDelta: 0,
+ ProducerID: producerID,
+ IsTransactional: isTransactional,
+ }
+ rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+ records.RecordBatch = batch
+
+ frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+
+ // batch
+ batch := &RecordBatch{
+ Version: 2,
+ LogAppendTime: r.LogAppendTime,
+ FirstTimestamp: timestamp,
+ MaxTimestamp: r.Timestamp,
+ FirstOffset: offset,
+ LastOffsetDelta: 0,
+ ProducerID: producerID,
+ IsTransactional: true,
+ Control: true,
+ }
+
+ // records
+ records := newDefaultRecords(nil)
+ records.RecordBatch = batch
+
+ // record
+ crAbort := ControlRecord{
+ Version: 0,
+ Type: recordType,
+ }
+ crKey := &realEncoder{raw: make([]byte, 4)}
+ crValue := &realEncoder{raw: make([]byte, 6)}
+ crAbort.encode(crKey, crValue)
+ rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+
+ frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+ r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
+}
+
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+ r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+ // define controlRecord key and value
+ r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+ frb := r.getOrCreateBlock(topic, partition)
+ if len(frb.RecordsSet) == 0 {
+ records := newDefaultRecords(&RecordBatch{Version: 2})
+ frb.RecordsSet = []*Records{&records}
+ }
+ batch := frb.RecordsSet[0].RecordBatch
+ batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+ frb := r.getOrCreateBlock(topic, partition)
+ frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..ff2ad20
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -0,0 +1,61 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+ CoordinatorGroup CoordinatorType = iota
+ CoordinatorTransaction
+)
+
+type FindCoordinatorRequest struct {
+ Version int16
+ CoordinatorKey string
+ CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(f.CoordinatorKey); err != nil {
+ return err
+ }
+
+ if f.Version >= 1 {
+ pe.putInt8(int8(f.CoordinatorType))
+ }
+
+ return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+ if f.CoordinatorKey, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ f.Version = version
+ coordinatorType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ f.CoordinatorType = CoordinatorType(coordinatorType)
+ }
+
+ return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+ return 10
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+ return f.Version
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+ switch f.Version {
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..9c900e8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
@@ -0,0 +1,92 @@
+package sarama
+
+import (
+ "time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+ ErrMsg *string
+ Coordinator *Broker
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 1 {
+ f.Version = version
+
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ f.Err = KError(tmp)
+
+ if version >= 1 {
+ if f.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ coordinator := new(Broker)
+ // The version is hardcoded to 0, as version 1 of the Broker-decode
+ // contains the rack-field which is not present in the FindCoordinatorResponse.
+ if err := coordinator.decode(pd, 0); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ f.Coordinator = coordinator
+
+ return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+ if f.Version >= 1 {
+ pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
+ }
+
+ pe.putInt16(int16(f.Err))
+
+ if f.Version >= 1 {
+ if err := pe.putNullableString(f.ErrMsg); err != nil {
+ return err
+ }
+ }
+
+ coordinator := f.Coordinator
+ if coordinator == nil {
+ coordinator = NoNode
+ }
+ if err := coordinator.encode(pe, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+ return 10
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+ return f.Version
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+ switch f.Version {
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
new file mode 100644
index 0000000..4337c00
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -0,0 +1,29 @@
+module github.com/Shopify/sarama
+
+go 1.13
+
+require (
+ github.com/Shopify/toxiproxy v2.1.4+incompatible
+ github.com/davecgh/go-spew v1.1.1
+ github.com/eapache/go-resiliency v1.1.0
+ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
+ github.com/eapache/queue v1.1.0
+ github.com/fortytw2/leaktest v1.3.0
+ github.com/frankban/quicktest v1.4.1 // indirect
+ github.com/golang/snappy v0.0.1 // indirect
+ github.com/hashicorp/go-uuid v1.0.1 // indirect
+ github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 // indirect
+ github.com/klauspost/compress v1.8.2
+ github.com/pierrec/lz4 v2.2.6+incompatible
+ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
+ github.com/stretchr/testify v1.3.0
+ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
+ github.com/xdg/stringprep v1.0.0 // indirect
+ golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+ gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
+ gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
+ gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
+ gopkg.in/jcmturner/gokrb5.v7 v7.2.3
+ gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
+)
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
new file mode 100644
index 0000000..d2f04ee
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -0,0 +1,67 @@
+github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
+github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
+github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI=
+github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=
+github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
+github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..57f3ecb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+ "encoding/asn1"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "gopkg.in/jcmturner/gokrb5.v7/asn1tools"
+ "gopkg.in/jcmturner/gokrb5.v7/gssapi"
+ "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
+ "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+ "gopkg.in/jcmturner/gokrb5.v7/messages"
+ "gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+const (
+ TOK_ID_KRB_AP_REQ = 256
+ GSS_API_GENERIC_TAG = 0x60
+ KRB5_USER_AUTH = 1
+ KRB5_KEYTAB_AUTH = 2
+ GSS_API_INITIAL = 1
+ GSS_API_VERIFY = 2
+ GSS_API_FINISH = 3
+)
+
+type GSSAPIConfig struct {
+ AuthType int
+ KeyTabPath string
+ KerberosConfigPath string
+ ServiceName string
+ Username string
+ Password string
+ Realm string
+}
+
+type GSSAPIKerberosAuth struct {
+ Config *GSSAPIConfig
+ ticket messages.Ticket
+ encKey types.EncryptionKey
+ NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+ step int
+}
+
+type KerberosClient interface {
+ Login() error
+ GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+ Domain() string
+ CName() types.PrincipalName
+ Destroy()
+}
+
+/*
+*
+* Appends length in big endian before payload, and send it to kafka
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+ length := len(payload)
+ finalPackage := make([]byte, length+4) //4 byte length header + payload
+ copy(finalPackage[4:], payload)
+ binary.BigEndian.PutUint32(finalPackage, uint32(length))
+ bytes, err := broker.conn.Write(finalPackage)
+ if err != nil {
+ return bytes, err
+ }
+ return bytes, nil
+}
+
+/*
+*
+* Read length (4 bytes) and then read the payload
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+ bytesRead := 0
+ lengthInBytes := make([]byte, 4)
+ bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+ if err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += bytes
+ payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+ payloadBytes := make([]byte, payloadLength) // buffer for read..
+ bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+ if err != nil {
+ return payloadBytes, bytesRead, err
+ }
+ bytesRead += bytes
+ return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+ a := make([]byte, 24)
+ flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+ binary.LittleEndian.PutUint32(a[:4], 16)
+ for _, i := range flags {
+ f := binary.LittleEndian.Uint32(a[20:24])
+ f |= uint32(i)
+ binary.LittleEndian.PutUint32(a[20:24], f)
+ }
+ return a
+}
+
+/*
+*
+* Construct Kerberos AP_REQ package, conforming to RFC-4120
+* https://tools.ietf.org/html/rfc4120#page-84
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+ domain string, cname types.PrincipalName,
+ ticket messages.Ticket,
+ sessionKey types.EncryptionKey) ([]byte, error) {
+ auth, err := types.NewAuthenticator(domain, cname)
+ if err != nil {
+ return nil, err
+ }
+ auth.Cksum = types.Checksum{
+ CksumType: chksumtype.GSSAPI,
+ Checksum: krbAuth.newAuthenticatorChecksum(),
+ }
+ APReq, err := messages.NewAPReq(
+ ticket,
+ sessionKey,
+ auth,
+ )
+ if err != nil {
+ return nil, err
+ }
+ aprBytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+ tb, err := APReq.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ aprBytes = append(aprBytes, tb...)
+ return aprBytes, nil
+}
+
+/*
+*
+* Append the GSS-API header to the payload, conforming to RFC-2743
+* Section 3.1, Mechanism-Independent Token Format
+*
+* https://tools.ietf.org/html/rfc2743#page-81
+*
+* GSSAPIHeader + <specific mechanism payload>
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+ oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
+ if err != nil {
+ return nil, err
+ }
+ tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+ GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+ GSSHeader = append(GSSHeader, oidBytes...)
+ GSSPackage := append(GSSHeader, payload...)
+ return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
+ switch krbAuth.step {
+ case GSS_API_INITIAL:
+ aprBytes, err := krbAuth.createKrb5Token(
+ kerberosClient.Domain(),
+ kerberosClient.CName(),
+ krbAuth.ticket,
+ krbAuth.encKey)
+ if err != nil {
+ return nil, err
+ }
+ krbAuth.step = GSS_API_VERIFY
+ return krbAuth.appendGSSAPIHeader(aprBytes)
+ case GSS_API_VERIFY:
+ wrapTokenReq := gssapi.WrapToken{}
+ if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+ return nil, err
+ }
+ // Validate response.
+ isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+ if !isValid {
+ return nil, err
+ }
+
+ wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+ if err != nil {
+ return nil, err
+ }
+ krbAuth.step = GSS_API_FINISH
+ return wrapTokenResponse.Marshal()
+ }
+ return nil, nil
+}
+
+/* This does the handshake for authorization */
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+
+ kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+ if err != nil {
+ Logger.Printf("Kerberos client error: %s", err)
+ return err
+ }
+
+ err = kerberosClient.Login()
+ if err != nil {
+ Logger.Printf("Kerberos client error: %s", err)
+ return err
+ }
+ // Construct SPN using serviceName and host
+ // SPN format: <SERVICE>/<FQDN>
+
+ host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
+ spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+
+ ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
+
+ if err != nil {
+ Logger.Printf("Error getting Kerberos service ticket : %s", err)
+ return err
+ }
+ krbAuth.ticket = ticket
+ krbAuth.encKey = encKey
+ krbAuth.step = GSS_API_INITIAL
+ var receivedBytes []byte = nil
+ defer kerberosClient.Destroy()
+ for {
+ packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
+ if err != nil {
+ Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+ return err
+ }
+ requestTime := time.Now()
+ bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+ if err != nil {
+ Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+ return err
+ }
+ broker.updateOutgoingCommunicationMetrics(bytesWritten)
+ if krbAuth.step == GSS_API_VERIFY {
+ var bytesRead = 0
+ receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+ requestLatency := time.Since(requestTime)
+ broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+ if err != nil {
+ Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+ return err
+ }
+ } else if krbAuth.step == GSS_API_FINISH {
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 0000000..ce49c47
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 0000000..766f5fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+ Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..8ceb6c2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -0,0 +1,43 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+ TransactionalID *string
+ TransactionTimeout time.Duration
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+ if err := pe.putNullableString(i.TransactionalID); err != nil {
+ return err
+ }
+ pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+
+ return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+ if i.TransactionalID, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+
+ return nil
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+ return 22
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+ return 0
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..1b32eb0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -0,0 +1,55 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+ ThrottleTime time.Duration
+ Err KError
+ ProducerID int64
+ ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
+ pe.putInt16(int16(i.Err))
+ pe.putInt64(i.ProducerID)
+ pe.putInt16(i.ProducerEpoch)
+
+ return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ i.Err = KError(kerr)
+
+ if i.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+ return 22
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+ return 0
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 0000000..97e9299
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,163 @@
+package sarama
+
+type GroupProtocol struct {
+ Name string
+ Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+ p.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ p.Metadata, err = pd.getBytes()
+ return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(p.Name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(p.Metadata); err != nil {
+ return err
+ }
+ return nil
+}
+
+type JoinGroupRequest struct {
+ Version int16
+ GroupId string
+ SessionTimeout int32
+ RebalanceTimeout int32
+ MemberId string
+ ProtocolType string
+ GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
+ OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if r.Version >= 1 {
+ pe.putInt32(r.RebalanceTimeout)
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if len(r.GroupProtocols) > 0 {
+ if len(r.OrderedGroupProtocols) > 0 {
+ return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+ return err
+ }
+ for _, protocol := range r.OrderedGroupProtocols {
+ if err := protocol.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if version >= 1 {
+ if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ protocol := &GroupProtocol{}
+ if err := protocol.decode(pd); err != nil {
+ return err
+ }
+ r.GroupProtocols[protocol.Name] = protocol.Metadata
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return r.Version
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ default:
+ return V0_9_0_0
+ }
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+ Name: name,
+ Metadata: metadata,
+ })
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+ bin, err := encode(metadata, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupProtocol(name, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 0000000..5752acc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,135 @@
+package sarama
+
+type JoinGroupResponse struct {
+ Version int16
+ ThrottleTime int32
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+ members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+ for id, bin := range r.Members {
+ meta := new(ConsumerGroupMemberMetadata)
+ if err := decode(bin, meta); err != nil {
+ return nil, err
+ }
+ members[id] = *meta
+ }
+ return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ if r.Version >= 2 {
+ pe.putInt32(r.ThrottleTime)
+ }
+ pe.putInt16(int16(r.Err))
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for memberId, memberMetadata := range r.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(memberMetadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 2 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return
+ }
+ }
+
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Members = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[memberId] = memberMetadata
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+ return r.Version
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ default:
+ return V0_9_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
new file mode 100644
index 0000000..91b998f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/kerberos_client.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+ krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
+ krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
+ "gopkg.in/jcmturner/gokrb5.v7/keytab"
+ "gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KerberosGoKrb5Client struct {
+ krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+ return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+ return c.Credentials.CName()
+}
+
+/*
+*
+* Create kerberos client used to obtain TGT and TGS tokens
+* used gokrb5 library, which is a pure go kerberos client with
+* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
+* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
+*
+ */
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+ cfg, err := krb5config.Load(config.KerberosConfigPath)
+ if err != nil {
+ return nil, err
+ }
+ return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+ var client *krb5client.Client
+ if config.AuthType == KRB5_KEYTAB_AUTH {
+ kt, err := keytab.Load(config.KeyTabPath)
+ if err != nil {
+ return nil, err
+ }
+ client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
+ } else {
+ client = krb5client.NewClientWithPassword(config.Username,
+ config.Realm, config.Password, cfg)
+ }
+ return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 0000000..e177427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+ GroupId string
+ MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 0000000..d60c626
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+ Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 0000000..7d864f6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,99 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "sync"
+)
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+ startOffset int
+ length int32
+}
+
+var lengthFieldPool = sync.Pool{}
+
+func acquireLengthField() *lengthField {
+ val := lengthFieldPool.Get()
+ if val != nil {
+ return val.(*lengthField)
+ }
+ return &lengthField{}
+}
+
+func releaseLengthField(m *lengthField) {
+ lengthFieldPool.Put(m)
+}
+
+func (l *lengthField) decode(pd packetDecoder) error {
+ var err error
+ l.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if l.length > int32(pd.remaining()) {
+ return ErrInsufficientData
+ }
+ return nil
+}
+
+func (l *lengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+ return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+ binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+ return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+ if int32(curOffset-l.startOffset-4) != l.length {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
+
+type varintLengthField struct {
+ startOffset int
+ length int64
+}
+
+func (l *varintLengthField) decode(pd packetDecoder) error {
+ var err error
+ l.length, err = pd.getVarint()
+ return err
+}
+
+func (l *varintLengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *varintLengthField) adjustLength(currOffset int) int {
+ oldFieldSize := l.reserveLength()
+ l.length = int64(currOffset - l.startOffset - oldFieldSize)
+
+ return l.reserveLength() - oldFieldSize
+}
+
+func (l *varintLengthField) reserveLength() int {
+ var tmp [binary.MaxVarintLen64]byte
+ return binary.PutVarint(tmp[:], l.length)
+}
+
+func (l *varintLengthField) run(curOffset int, buf []byte) error {
+ binary.PutVarint(buf[l.startOffset:], l.length)
+ return nil
+}
+
+func (l *varintLengthField) check(curOffset int, buf []byte) error {
+ if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 0000000..3b16abf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 0000000..56115d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Err KError
+ Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Groups = make(map[string]string)
+ for i := 0; i < n; i++ {
+ groupId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 0000000..7c54748
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,175 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ //CompressionNone no compression
+ CompressionNone CompressionCodec = iota
+ //CompressionGZIP compression using GZIP
+ CompressionGZIP
+ //CompressionSnappy compression using snappy
+ CompressionSnappy
+ //CompressionLZ4 compression using LZ4
+ CompressionLZ4
+ //CompressionZSTD compression using ZSTD
+ CompressionZSTD
+
+ // The lowest 3 bits contain the compression codec used for the message
+ compressionCodecMask int8 = 0x07
+
+ // Bit 3 set for "LogAppend" timestamps
+ timestampTypeMask = 0x08
+
+ // CompressionLevelDefault is the constant to use in CompressionLevel
+ // to have the default compression level for any codec. The value is picked
+ // that we don't use any existing compression levels.
+ CompressionLevelDefault = -1000
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+func (cc CompressionCodec) String() string {
+ return []string{
+ "none",
+ "gzip",
+ "snappy",
+ "lz4",
+ "zstd",
+ }[int(cc)]
+}
+
+//Message is a kafka message type
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ CompressionLevel int // compression level
+ LogAppendTime bool // the used timestamp is LogAppendTime
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+ Version int8 // v1 requires Kafka 0.10
+ Timestamp time.Time // the timestamp of the message (version 1+ only)
+
+ compressedCache []byte
+ compressedSize int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(newCRC32Field(crcIEEE))
+
+ pe.putInt8(m.Version)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ if m.LogAppendTime {
+ attributes |= timestampTypeMask
+ }
+ pe.putInt8(attributes)
+
+ if m.Version >= 1 {
+ if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+ return err
+ }
+ }
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else if m.Value != nil {
+
+ payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+ if err != nil {
+ return err
+ }
+ m.compressedCache = payload
+ // Keep in mind the compressed payload size for metric gathering
+ m.compressedSize = len(payload)
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ crc32Decoder := acquireCrc32Field(crcIEEE)
+ defer releaseCrc32Field(crc32Decoder)
+
+ err = pd.push(crc32Decoder)
+ if err != nil {
+ return err
+ }
+
+ m.Version, err = pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ if m.Version > 1 {
+ return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+ m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask
+
+ if m.Version == 1 {
+ if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+ return err
+ }
+ }
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ // Required for deep equal assertion during tests but might be useful
+ // for future metrics about the compression ratio in fetch requests
+ m.compressedSize = len(m.Value)
+
+ switch m.Codec {
+ case CompressionNone:
+ // nothing to do
+ default:
+ if m.Value == nil {
+ break
+ }
+
+ m.Value, err = decompress(m.Codec, m.Value)
+ if err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previously encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 0000000..6523ec2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,111 @@
+package sarama
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ lengthDecoder := acquireLengthField()
+ defer releaseLengthField(lengthDecoder)
+
+ if err = pd.push(lengthDecoder); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ OverflowMessage bool // whether the set on the wire contained an overflow message
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ magic, err := magicValue(pd)
+ if err != nil {
+ if err == ErrInsufficientData {
+ ms.PartialTrailingMessage = true
+ return nil
+ }
+ return err
+ }
+
+ if magic > 1 {
+ return nil
+ }
+
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ switch err {
+ case nil:
+ ms.Messages = append(ms.Messages, msb)
+ case ErrInsufficientData:
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ if msb.Offset == -1 {
+ // This is an overflow message caused by chunked down conversion
+ ms.OverflowMessage = true
+ } else {
+ ms.PartialTrailingMessage = true
+ }
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 0000000..1b590d3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type MetadataRequest struct {
+ Version int16
+ Topics []string
+ AllowAutoTopicCreation bool
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 5 {
+ return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+ }
+ if r.Version == 0 || len(r.Topics) > 0 {
+ err := pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+
+ for i := range r.Topics {
+ err = pe.putString(r.Topics[i])
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ pe.putInt32(-1)
+ }
+ if r.Version > 3 {
+ pe.putBool(r.AllowAutoTopicCreation)
+ }
+ return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+ size, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if size > 0 {
+ r.Topics = make([]string, size)
+ for i := range r.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topics[i] = topic
+ }
+ }
+ if r.Version > 3 {
+ autoCreation, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.AllowAutoTopicCreation = autoCreation
+ }
+ return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+ return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+ return r.Version
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_0_0
+ case 2:
+ return V0_10_1_0
+ case 3, 4:
+ return V0_11_0_0
+ case 5:
+ return V1_0_0_0
+ default:
+ return MinVersion
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 0000000..b2d532e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,322 @@
+package sarama
+
+type PartitionMetadata struct {
+ Err KError
+ ID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+ OfflineReplicas []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pm.Err = KError(tmp)
+
+ pm.ID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Leader, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ pm.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ if version >= 5 {
+ pm.OfflineReplicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(pm.Err))
+ pe.putInt32(pm.ID)
+ pe.putInt32(pm.Leader)
+
+ err = pe.putInt32Array(pm.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(pm.Isr)
+ if err != nil {
+ return err
+ }
+
+ if version >= 5 {
+ err = pe.putInt32Array(pm.OfflineReplicas)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type TopicMetadata struct {
+ Err KError
+ Name string
+ IsInternal bool // Only valid for Version >= 1
+ Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ tm.Err = KError(tmp)
+
+ tm.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ tm.IsInternal, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ tm.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ tm.Partitions[i] = new(PartitionMetadata)
+ err = tm.Partitions[i].decode(pd, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(tm.Err))
+
+ err = pe.putString(tm.Name)
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ pe.putBool(tm.IsInternal)
+ }
+
+ err = pe.putArrayLength(len(tm.Partitions))
+ if err != nil {
+ return err
+ }
+
+ for _, pm := range tm.Partitions {
+ err = pm.encode(pe, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type MetadataResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Brokers []*Broker
+ ClusterID *string
+ ControllerID int32
+ Topics []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 3 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Brokers = make([]*Broker, n)
+ for i := 0; i < n; i++ {
+ r.Brokers[i] = new(Broker)
+ err = r.Brokers[i].decode(pd, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ if version >= 2 {
+ r.ClusterID, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if version >= 1 {
+ r.ControllerID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ } else {
+ r.ControllerID = -1
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]*TopicMetadata, n)
+ for i := 0; i < n; i++ {
+ r.Topics[i] = new(TopicMetadata)
+ err = r.Topics[i].decode(pd, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+
+ err := pe.putArrayLength(len(r.Brokers))
+ if err != nil {
+ return err
+ }
+ for _, broker := range r.Brokers {
+ err = broker.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ err := pe.putNullableString(r.ClusterID)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ControllerID)
+ }
+
+ err = pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+ for _, tm := range r.Topics {
+ err = tm.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+ return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+ return r.Version
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_0_0
+ case 2:
+ return V0_10_1_0
+ case 3, 4:
+ return V0_11_0_0
+ case 5:
+ return V1_0_0_0
+ default:
+ return MinVersion
+ }
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+ r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range r.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
+ tmatch := r.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ pmatch.Isr = isr
+ pmatch.OfflineReplicas = offline
+ pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 0000000..90e5a87
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,43 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+ metricsReservoirSize = 1028
+ metricsAlphaFactor = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+ return r.GetOrRegister(name, func() metrics.Histogram {
+ return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+ }).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+ // Use broker id like the Java client as it does not contain '.' or ':' characters that
+ // can be interpreted as special character by monitoring tool (e.g. Graphite)
+ return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+ // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+ // cf. KAFKA-1902 and KAFKA-2337
+ return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 0000000..4ed46a6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,403 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const (
+ expectationTimeout = 500 * time.Millisecond
+)
+
+type GSSApiHandlerFunc func([]byte) []byte
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+ brokerID int32
+ port int32
+ closing chan none
+ stopper chan none
+ expectations chan encoder
+ listener net.Listener
+ t TestReporter
+ latency time.Duration
+ handler requestHandlerFunc
+ notifier RequestNotifierFunc
+ history []RequestResponse
+ lock sync.Mutex
+ gssApiHandler GSSApiHandlerFunc
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+ Request protocolBody
+ Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+ b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+ b.setHandler(func(req *request) (res encoder) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ mockResponse := handlerMap[reqTypeName]
+ if mockResponse == nil {
+ return nil
+ }
+ return mockResponse.For(req.body)
+ })
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+ b.lock.Lock()
+ b.notifier = notifier
+ b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+ return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+ b.lock.Lock()
+ history := make([]RequestResponse, len(b.history))
+ copy(history, b.history)
+ b.lock.Unlock()
+ return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+ return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+ return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+ close(b.expectations)
+ if len(b.expectations) > 0 {
+ buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+ for e := range b.expectations {
+ _, _ = buf.WriteString(spew.Sdump(e))
+ }
+ b.t.Error(buf.String())
+ }
+ close(b.closing)
+ <-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+ b.lock.Lock()
+ b.handler = handler
+ b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+ defer close(b.stopper)
+ var err error
+ var conn net.Conn
+
+ go func() {
+ <-b.closing
+ err := b.listener.Close()
+ if err != nil {
+ b.t.Error(err)
+ }
+ }()
+
+ wg := &sync.WaitGroup{}
+ i := 0
+ for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+ wg.Add(1)
+ go b.handleRequests(conn, i, wg)
+ i++
+ }
+ wg.Wait()
+ Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+ b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+ var (
+ bytesRead int
+ lengthBytes = make([]byte, 4)
+ )
+
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, err
+ }
+
+ bytesRead += len(lengthBytes)
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+ if length <= 4 || length > MaxRequestSize {
+ return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, err
+ }
+
+ bytesRead += len(encodedReq)
+
+ fullBytes := append(lengthBytes, encodedReq...)
+
+ return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+ return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+ defer wg.Done()
+ defer func() {
+ _ = conn.Close()
+ }()
+ Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+ var err error
+
+ abort := make(chan none)
+ defer close(abort)
+ go func() {
+ select {
+ case <-b.closing:
+ _ = conn.Close()
+ case <-abort:
+ }
+ }()
+
+ resHeader := make([]byte, 8)
+ var bytesWritten int
+ var bytesRead int
+ for {
+
+ buffer, err := b.readToBytes(conn)
+ if err != nil {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
+ b.serverError(err)
+ break
+ }
+
+ bytesWritten = 0
+ if !b.isGSSAPI(buffer) {
+
+ req, br, err := decodeRequest(bytes.NewReader(buffer))
+ bytesRead = br
+ if err != nil {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+ b.serverError(err)
+ break
+ }
+
+ if b.latency > 0 {
+ time.Sleep(b.latency)
+ }
+
+ b.lock.Lock()
+ res := b.handler(req)
+ b.history = append(b.history, RequestResponse{req.body, res})
+ b.lock.Unlock()
+
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+ continue
+ }
+ Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+ encodedRes, err := encode(res, nil)
+ if err != nil {
+ b.serverError(err)
+ break
+ }
+ if len(encodedRes) == 0 {
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, 0)
+ }
+ b.lock.Unlock()
+ continue
+ }
+
+ binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+ binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+ if _, err = conn.Write(resHeader); err != nil {
+ b.serverError(err)
+ break
+ }
+ if _, err = conn.Write(encodedRes); err != nil {
+ b.serverError(err)
+ break
+ }
+ bytesWritten = len(resHeader) + len(encodedRes)
+
+ } else {
+ // GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+ // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+ b.lock.Lock()
+ res := b.gssApiHandler(buffer)
+ b.lock.Unlock()
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+ continue
+ }
+ if _, err = conn.Write(res); err != nil {
+ b.serverError(err)
+ break
+ }
+ bytesWritten = len(res)
+ }
+
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, bytesWritten)
+ }
+ b.lock.Unlock()
+
+ }
+ Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+ select {
+ case res, ok := <-b.expectations:
+ if !ok {
+ return nil
+ }
+ return res
+ case <-time.After(expectationTimeout):
+ return nil
+ }
+}
+
+func (b *MockBroker) serverError(err error) {
+ isConnectionClosedError := false
+ if _, ok := err.(*net.OpError); ok {
+ isConnectionClosedError = true
+ } else if err == io.EOF {
+ isConnectionClosedError = true
+ } else if err.Error() == "use of closed network connection" {
+ isConnectionClosedError = true
+ }
+
+ if isConnectionClosedError {
+ return
+ }
+
+ b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use. If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+ return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+ listener, err := net.Listen("tcp", addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+ var err error
+
+ broker := &MockBroker{
+ closing: make(chan none),
+ stopper: make(chan none),
+ t: t,
+ brokerID: brokerID,
+ expectations: make(chan encoder, 512),
+ listener: listener,
+ }
+ broker.handler = broker.defaultRequestHandler
+
+ Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+ _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ broker.port = int32(tmp)
+
+ go broker.serverLoop()
+
+ return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+ b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go
new file mode 100644
index 0000000..affeb2d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockkerberos.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "gopkg.in/jcmturner/gokrb5.v7/credentials"
+ "gopkg.in/jcmturner/gokrb5.v7/gssapi"
+ "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+ "gopkg.in/jcmturner/gokrb5.v7/messages"
+ "gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KafkaGSSAPIHandler struct {
+ client *MockKerberosClient
+ badResponse bool
+ badKeyChecksum bool
+}
+
+func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
+ // Default payload used for verify
+ err := h.client.Login() // Mock client construct keys when login
+ if err != nil {
+ return nil
+ }
+ if h.badResponse { // Returns trash
+ return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
+ }
+
+ var pack = gssapi.WrapToken{
+ Flags: KRB5_USER_AUTH,
+ EC: 12,
+ RRC: 0,
+ SndSeqNum: 3398292281,
+ Payload: []byte{0x11, 0x00}, // 1100
+ }
+ // Compute checksum
+ if h.badKeyChecksum {
+ pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+ } else {
+ err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
+ if err != nil {
+ return nil
+ }
+ }
+
+ packBytes, err := pack.Marshal()
+ if err != nil {
+ return nil
+ }
+ lenBytes := len(packBytes)
+ response := make([]byte, lenBytes+4)
+ copy(response[4:], packBytes)
+ binary.BigEndian.PutUint32(response, uint32(lenBytes))
+ return response
+}
+
+type MockKerberosClient struct {
+ asReqBytes string
+ asRepBytes string
+ ASRep messages.ASRep
+ credentials *credentials.Credentials
+ mockError error
+ errorStage string
+}
+
+func (c *MockKerberosClient) Login() error {
+ if c.errorStage == "login" && c.mockError != nil {
+ return c.mockError
+ }
+ c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
+ "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
+ "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
+ "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
+ "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
+ "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
+ "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
+ "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
+ "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
+ "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
+ "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
+ "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
+ "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
+ "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
+ "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
+ apRepBytes, err := hex.DecodeString(c.asRepBytes)
+ if err != nil {
+ return err
+ }
+ err = c.ASRep.Unmarshal(apRepBytes)
+ if err != nil {
+ return err
+ }
+ c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
+ _, err = c.ASRep.DecryptEncPart(c.credentials)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
+ if c.errorStage == "service_ticket" && c.mockError != nil {
+ return messages.Ticket{}, types.EncryptionKey{}, c.mockError
+ }
+ return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
+}
+
+func (c *MockKerberosClient) Domain() string {
+ return "EXAMPLE.COM"
+}
+func (c *MockKerberosClient) CName() types.PrincipalName {
+ var p = types.PrincipalName{
+ NameType: KRB5_USER_AUTH,
+ NameString: []string{
+ "kafka",
+ "kafka",
+ },
+ }
+ return p
+}
+func (c *MockKerberosClient) Destroy() {
+ // Do nothing.
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 0000000..7dcc93e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,941 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+ Error(...interface{})
+ Errorf(string, ...interface{})
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+ For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+ res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+ return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+ return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+ responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+ ms := &MockSequence{}
+ ms.responses = make([]MockResponse, len(responses))
+ for i, res := range responses {
+ switch res := res.(type) {
+ case MockResponse:
+ ms.responses[i] = res
+ case encoder:
+ ms.responses[i] = NewMockWrapper(res)
+ default:
+ panic(fmt.Sprintf("Unexpected response type: %T", res))
+ }
+ }
+ return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+ res = mc.responses[0].For(reqBody)
+ if len(mc.responses) > 1 {
+ mc.responses = mc.responses[1:]
+ }
+ return res
+}
+
+type MockListGroupsResponse struct {
+ groups map[string]string
+ t TestReporter
+}
+
+func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
+ return &MockListGroupsResponse{
+ groups: make(map[string]string),
+ t: t,
+ }
+}
+
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder {
+ request := reqBody.(*ListGroupsRequest)
+ _ = request
+ response := &ListGroupsResponse{
+ Groups: m.groups,
+ }
+ return response
+}
+
+func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
+ m.groups[groupID] = protocolType
+ return m
+}
+
+type MockDescribeGroupsResponse struct {
+ groups map[string]*GroupDescription
+ t TestReporter
+}
+
+func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
+ return &MockDescribeGroupsResponse{
+ t: t,
+ groups: make(map[string]*GroupDescription),
+ }
+}
+
+func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
+ m.groups[groupID] = description
+ return m
+}
+
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder {
+ request := reqBody.(*DescribeGroupsRequest)
+
+ response := &DescribeGroupsResponse{}
+ for _, requestedGroup := range request.Groups {
+ if group, ok := m.groups[requestedGroup]; ok {
+ response.Groups = append(response.Groups, group)
+ } else {
+ // Mimic real kafka - if a group doesn't exist, return
+ // an entry with state "Dead"
+ response.Groups = append(response.Groups, &GroupDescription{
+ GroupId: requestedGroup,
+ State: "Dead",
+ })
+ }
+ }
+
+ return response
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+ controllerID int32
+ leaders map[string]map[int32]int32
+ brokers map[string]int32
+ t TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+ return &MockMetadataResponse{
+ leaders: make(map[string]map[int32]int32),
+ brokers: make(map[string]int32),
+ t: t,
+ }
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+ partitions := mmr.leaders[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int32)
+ mmr.leaders[topic] = partitions
+ }
+ partitions[partition] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+ mmr.brokers[addr] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+ mmr.controllerID = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+ metadataRequest := reqBody.(*MetadataRequest)
+ metadataResponse := &MetadataResponse{
+ Version: metadataRequest.version(),
+ ControllerID: mmr.controllerID,
+ }
+ for addr, brokerID := range mmr.brokers {
+ metadataResponse.AddBroker(addr, brokerID)
+ }
+
+ // Generate set of replicas
+ replicas := []int32{}
+ offlineReplicas := []int32{}
+ for _, brokerID := range mmr.brokers {
+ replicas = append(replicas, brokerID)
+ }
+
+ if len(metadataRequest.Topics) == 0 {
+ for topic, partitions := range mmr.leaders {
+ for partition, brokerID := range partitions {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+ }
+ }
+ return metadataResponse
+ }
+ for _, topic := range metadataRequest.Topics {
+ for partition, brokerID := range mmr.leaders[topic] {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+ }
+ }
+ return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+ offsets map[string]map[int32]map[int64]int64
+ t TestReporter
+ version int16
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+ return &MockOffsetResponse{
+ offsets: make(map[string]map[int32]map[int64]int64),
+ t: t,
+ }
+}
+
+func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
+ mor.version = version
+ return mor
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]int64)
+ mor.offsets[topic] = partitions
+ }
+ times := partitions[partition]
+ if times == nil {
+ times = make(map[int64]int64)
+ partitions[partition] = times
+ }
+ times[time] = offset
+ return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+ offsetRequest := reqBody.(*OffsetRequest)
+ offsetResponse := &OffsetResponse{Version: mor.version}
+ for topic, partitions := range offsetRequest.blocks {
+ for partition, block := range partitions {
+ offset := mor.getOffset(topic, partition, block.time)
+ offsetResponse.AddTopicPartition(topic, partition, offset)
+ }
+ }
+ return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ mor.t.Errorf("missing topic: %s", topic)
+ }
+ times := partitions[partition]
+ if times == nil {
+ mor.t.Errorf("missing partition: %d", partition)
+ }
+ offset, ok := times[time]
+ if !ok {
+ mor.t.Errorf("missing time: %d", time)
+ }
+ return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+ messages map[string]map[int32]map[int64]Encoder
+ highWaterMarks map[string]map[int32]int64
+ t TestReporter
+ batchSize int
+ version int16
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+ return &MockFetchResponse{
+ messages: make(map[string]map[int32]map[int64]Encoder),
+ highWaterMarks: make(map[string]map[int32]int64),
+ t: t,
+ batchSize: batchSize,
+ }
+}
+
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+ mfr.version = version
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]Encoder)
+ mfr.messages[topic] = partitions
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ messages = make(map[int64]Encoder)
+ partitions[partition] = messages
+ }
+ messages[offset] = msg
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int64)
+ mfr.highWaterMarks[topic] = partitions
+ }
+ partitions[partition] = offset
+ return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+ fetchRequest := reqBody.(*FetchRequest)
+ res := &FetchResponse{
+ Version: mfr.version,
+ }
+ for topic, partitions := range fetchRequest.blocks {
+ for partition, block := range partitions {
+ initialOffset := block.fetchOffset
+ offset := initialOffset
+ maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+ for i := 0; i < mfr.batchSize && offset < maxOffset; {
+ msg := mfr.getMessage(topic, partition, offset)
+ if msg != nil {
+ res.AddMessage(topic, partition, nil, msg, offset)
+ i++
+ }
+ offset++
+ }
+ fb := res.GetBlock(topic, partition)
+ if fb == nil {
+ res.AddError(topic, partition, ErrNoError)
+ fb = res.GetBlock(topic, partition)
+ }
+ fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+ }
+ }
+ return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return nil
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return nil
+ }
+ return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return 0
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return 0
+ }
+ return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ return 0
+ }
+ return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+ coordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+ return &MockConsumerMetadataResponse{
+ coordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = broker
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = kerror
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ConsumerMetadataRequest)
+ group := req.ConsumerGroup
+ res := &ConsumerMetadataResponse{}
+ v := mr.coordinators[group]
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+ groupCoordinators map[string]interface{}
+ transCoordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+ return &MockFindCoordinatorResponse{
+ groupCoordinators: make(map[string]interface{}),
+ transCoordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+ switch coordinatorType {
+ case CoordinatorGroup:
+ mr.groupCoordinators[group] = broker
+ case CoordinatorTransaction:
+ mr.transCoordinators[group] = broker
+ }
+ return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+ switch coordinatorType {
+ case CoordinatorGroup:
+ mr.groupCoordinators[group] = kerror
+ case CoordinatorTransaction:
+ mr.transCoordinators[group] = kerror
+ }
+ return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*FindCoordinatorRequest)
+ res := &FindCoordinatorResponse{}
+ var v interface{}
+ switch req.CoordinatorType {
+ case CoordinatorGroup:
+ v = mr.groupCoordinators[req.CoordinatorKey]
+ case CoordinatorTransaction:
+ v = mr.transCoordinators[req.CoordinatorKey]
+ }
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+ errors map[string]map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+ return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[string]map[int32]KError)
+ }
+ topics := mr.errors[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]KError)
+ mr.errors[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ topics[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetCommitRequest)
+ group := req.ConsumerGroup
+ res := &OffsetCommitResponse{}
+ for topic, partitions := range req.blocks {
+ for partition := range partitions {
+ res.AddError(topic, partition, mr.getError(group, topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+ topics := mr.errors[group]
+ if topics == nil {
+ return ErrNoError
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+ version int16
+ errors map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+ return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+ mr.version = version
+ return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[int32]KError)
+ }
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ mr.errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ProduceRequest)
+ res := &ProduceResponse{
+ Version: mr.version,
+ }
+ for topic, partitions := range req.records {
+ for partition := range partitions {
+ res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+ offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+ error KError
+ t TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+ return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+ if mr.offsets == nil {
+ mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ topics := mr.offsets[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ mr.offsets[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ topics[topic] = partitions
+ }
+ partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+ mr.error = kerror
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetFetchRequest)
+ group := req.ConsumerGroup
+ res := &OffsetFetchResponse{Version: req.Version}
+
+ for topic, partitions := range mr.offsets[group] {
+ for partition, block := range partitions {
+ res.AddBlock(topic, partition, block)
+ }
+ }
+
+ if res.Version >= 2 {
+ res.Err = mr.error
+ }
+ return res
+}
+
+type MockCreateTopicsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+ return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*CreateTopicsRequest)
+ res := &CreateTopicsResponse{
+ Version: req.Version,
+ }
+ res.TopicErrors = make(map[string]*TopicError)
+
+ for topic := range req.TopicDetails {
+ if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+ msg := "insufficient permissions to create topic with reserved prefix"
+ res.TopicErrors[topic] = &TopicError{
+ Err: ErrTopicAuthorizationFailed,
+ ErrMsg: &msg,
+ }
+ continue
+ }
+ res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+ }
+ return res
+}
+
+type MockDeleteTopicsResponse struct {
+ t TestReporter
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+ return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*DeleteTopicsRequest)
+ res := &DeleteTopicsResponse{}
+ res.TopicErrorCodes = make(map[string]KError)
+
+ for _, topic := range req.Topics {
+ res.TopicErrorCodes[topic] = ErrNoError
+ }
+ res.Version = int16(req.Version)
+ return res
+}
+
+type MockCreatePartitionsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+ return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*CreatePartitionsRequest)
+ res := &CreatePartitionsResponse{}
+ res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+ for topic := range req.TopicPartitions {
+ if strings.HasPrefix(topic, "_") {
+ msg := "insufficient permissions to create partition on topic with reserved prefix"
+ res.TopicPartitionErrors[topic] = &TopicPartitionError{
+ Err: ErrTopicAuthorizationFailed,
+ ErrMsg: &msg,
+ }
+ continue
+ }
+ res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+ }
+ return res
+}
+
+type MockDeleteRecordsResponse struct {
+ t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+ return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*DeleteRecordsRequest)
+ res := &DeleteRecordsResponse{}
+ res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+ for topic, deleteRecordRequestTopic := range req.Topics {
+ partitions := make(map[int32]*DeleteRecordsResponsePartition)
+ for partition := range deleteRecordRequestTopic.PartitionOffsets {
+ partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+ }
+ res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+ }
+ return res
+}
+
+type MockDescribeConfigsResponse struct {
+ t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+ return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*DescribeConfigsRequest)
+ res := &DescribeConfigsResponse{}
+
+ for _, r := range req.Resources {
+ var configEntries []*ConfigEntry
+ switch r.Type {
+ case TopicResource:
+ configEntries = append(configEntries,
+ &ConfigEntry{Name: "max.message.bytes",
+ Value: "1000000",
+ ReadOnly: false,
+ Default: true,
+ Sensitive: false,
+ }, &ConfigEntry{Name: "retention.ms",
+ Value: "5000",
+ ReadOnly: false,
+ Default: false,
+ Sensitive: false,
+ }, &ConfigEntry{Name: "password",
+ Value: "12345",
+ ReadOnly: false,
+ Default: false,
+ Sensitive: true,
+ })
+ res.Resources = append(res.Resources, &ResourceResponse{
+ Name: r.Name,
+ Configs: configEntries,
+ })
+ }
+ }
+ return res
+}
+
+type MockAlterConfigsResponse struct {
+ t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+ return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*AlterConfigsRequest)
+ res := &AlterConfigsResponse{}
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
+ Type: TopicResource,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockCreateAclsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+ return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*CreateAclsRequest)
+ res := &CreateAclsResponse{}
+
+ for range req.AclCreations {
+ res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+ }
+ return res
+}
+
+type MockListAclsResponse struct {
+ t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+ return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*DescribeAclsRequest)
+ res := &DescribeAclsResponse{}
+ res.Err = ErrNoError
+ acl := &ResourceAcls{}
+ if req.ResourceName != nil {
+ acl.Resource.ResourceName = *req.ResourceName
+ }
+ acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
+ acl.Resource.ResourceType = req.ResourceType
+
+ host := "*"
+ if req.Host != nil {
+ host = *req.Host
+ }
+
+ principal := "User:test"
+ if req.Principal != nil {
+ principal = *req.Principal
+ }
+
+ permissionType := req.PermissionType
+ if permissionType == AclPermissionAny {
+ permissionType = AclPermissionAllow
+ }
+
+ acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+ res.ResourceAcls = append(res.ResourceAcls, acl)
+ res.Version = int16(req.Version)
+ return res
+}
+
+type MockSaslAuthenticateResponse struct {
+ t TestReporter
+ kerror KError
+ saslAuthBytes []byte
+}
+
+func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
+ return &MockSaslAuthenticateResponse{t: t}
+}
+
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder {
+ res := &SaslAuthenticateResponse{}
+ res.Err = msar.kerror
+ res.SaslAuthBytes = msar.saslAuthBytes
+ return res
+}
+
+func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
+ msar.kerror = kerror
+ return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
+ msar.saslAuthBytes = saslAuthBytes
+ return msar
+}
+
+type MockDeleteAclsResponse struct {
+ t TestReporter
+}
+
+type MockSaslHandshakeResponse struct {
+ enabledMechanisms []string
+ kerror KError
+ t TestReporter
+}
+
+func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
+ return &MockSaslHandshakeResponse{t: t}
+}
+
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder {
+ res := &SaslHandshakeResponse{}
+ res.Err = mshr.kerror
+ res.EnabledMechanisms = mshr.enabledMechanisms
+ return res
+}
+
+func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
+ mshr.kerror = kerror
+ return mshr
+}
+
+func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
+ mshr.enabledMechanisms = enabledMechanisms
+ return mshr
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+ return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*DeleteAclsRequest)
+ res := &DeleteAclsResponse{}
+
+ for range req.Filters {
+ response := &FilterResponse{Err: ErrNoError}
+ response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+ res.FilterResponses = append(res.FilterResponses, response)
+ }
+ res.Version = int16(req.Version)
+ return res
+}
+
+type MockDeleteGroupsResponse struct {
+ deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+ return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+ m.deletedGroups = groups
+ return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder {
+ resp := &DeleteGroupsResponse{
+ GroupErrorCodes: map[string]KError{},
+ }
+ for _, group := range m.deletedGroups {
+ resp.GroupErrorCodes[group] = ErrNoError
+ }
+ return resp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 0000000..5732ed9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,210 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ metadata string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(b.offset)
+ if version == 1 {
+ pe.putInt64(b.timestamp)
+ } else if b.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+
+ return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ b.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.9.0 and later)
+ // - 3 (kafka 0.11.0 and later)
+ // - 4 (kafka 2.0.0 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 4 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ if r.Version >= 2 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_9_0_0
+ case 3:
+ return V0_11_0_0
+ case 4:
+ return V2_0_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+ partitions := r.blocks[topic]
+ if partitions == nil {
+ return 0, "", errors.New("no such offset")
+ }
+ block := partitions[partitionID]
+ if block == nil {
+ return 0, "", errors.New("no such offset")
+ }
+ return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 0000000..e842298
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+type OffsetCommitResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putInt16(int16(kerror))
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 3 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Errors[name][id] = KError(tmp)
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_9_0_0
+ case 3:
+ return V0_11_0_0
+ case 4:
+ return V2_0_0_0
+ default:
+ return MinVersion
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..6860824
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ Version int16
+ ConsumerGroup string
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 5 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ if err = pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 2 && r.partitions == nil {
+ pe.putInt32(-1)
+ } else {
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.partitions {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+ return nil
+ }
+ r.partitions = make(map[string][]int32)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ r.partitions[topic] = partitions
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_10_2_0
+ case 3:
+ return V0_11_0_0
+ case 4:
+ return V2_0_0_0
+ case 5:
+ return V2_1_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+ if r.partitions == nil && r.Version >= 2 {
+ r.partitions = make(map[string][]int32)
+ }
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..9e25702
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,197 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ LeaderEpoch int32
+ Metadata string
+ Err KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 5 {
+ b.LeaderEpoch, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ b.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt64(b.Offset)
+
+ if version >= 5 {
+ pe.putInt32(b.LeaderEpoch)
+ }
+
+ err = pe.putString(b.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt16(int16(b.Err))
+
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+ Err KError
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+
+ if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ if r.Version >= 2 {
+ pe.putInt16(int16(r.Err))
+ }
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 3 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTopics > 0 {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numBlocks == 0 {
+ r.Blocks[name] = nil
+ continue
+ }
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+ }
+
+ if version >= 2 {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+ }
+
+ return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_10_2_0
+ case 3:
+ return V0_11_0_0
+ case 4:
+ return V2_0_0_0
+ case 5:
+ return V2_1_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 0000000..923972f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,580 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+ ticker *time.Ticker
+
+ memberID string
+ generation int32
+
+ broker *Broker
+ brokerLock sync.RWMutex
+
+ poms map[string]map[int32]*partitionOffsetManager
+ pomsLock sync.RWMutex
+
+ closeOnce sync.Once
+ closing chan none
+ closed chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ conf := client.Config()
+ om := &offsetManager{
+ client: client,
+ conf: conf,
+ group: group,
+ ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+
+ memberID: memberID,
+ generation: generation,
+
+ closing: make(chan none),
+ closed: make(chan none),
+ }
+ go withRecover(om.mainLoop)
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.pomsLock.Lock()
+ defer om.pomsLock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ om.closeOnce.Do(func() {
+ // exit the mainLoop
+ close(om.closing)
+ <-om.closed
+
+ // mark all POMs as closed
+ om.asyncClosePOMs()
+
+ // flush one last time
+ for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+ om.flushToBroker()
+ if om.releasePOMs(false) == 0 {
+ break
+ }
+ }
+
+ om.releasePOMs(true)
+ om.brokerLock.Lock()
+ om.broker = nil
+ om.brokerLock.Unlock()
+ })
+ return nil
+}
+
+func (om *offsetManager) computeBackoff(retries int) time.Duration {
+ if om.conf.Metadata.Retry.BackoffFunc != nil {
+ return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
+ } else {
+ return om.conf.Metadata.Retry.Backoff
+ }
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
+ broker, err := om.coordinator()
+ if err != nil {
+ if retries <= 0 {
+ return 0, "", err
+ }
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ }
+
+ req := new(OffsetFetchRequest)
+ req.Version = 1
+ req.ConsumerGroup = om.group
+ req.AddPartition(topic, partition)
+
+ resp, err := broker.FetchOffset(req)
+ if err != nil {
+ if retries <= 0 {
+ return 0, "", err
+ }
+ om.releaseCoordinator(broker)
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ }
+
+ block := resp.GetBlock(topic, partition)
+ if block == nil {
+ return 0, "", ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ return block.Offset, block.Metadata, nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return 0, "", block.Err
+ }
+ om.releaseCoordinator(broker)
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return 0, "", block.Err
+ }
+ backoff := om.computeBackoff(retries)
+ select {
+ case <-om.closing:
+ return 0, "", block.Err
+ case <-time.After(backoff):
+ }
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ default:
+ return 0, "", block.Err
+ }
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+ om.brokerLock.RLock()
+ broker := om.broker
+ om.brokerLock.RUnlock()
+
+ if broker != nil {
+ return broker, nil
+ }
+
+ om.brokerLock.Lock()
+ defer om.brokerLock.Unlock()
+
+ if broker := om.broker; broker != nil {
+ return broker, nil
+ }
+
+ if err := om.client.RefreshCoordinator(om.group); err != nil {
+ return nil, err
+ }
+
+ broker, err := om.client.Coordinator(om.group)
+ if err != nil {
+ return nil, err
+ }
+
+ om.broker = broker
+ return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+ om.brokerLock.Lock()
+ if om.broker == b {
+ om.broker = nil
+ }
+ om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+ defer om.ticker.Stop()
+ defer close(om.closed)
+
+ for {
+ select {
+ case <-om.ticker.C:
+ om.flushToBroker()
+ om.releasePOMs(false)
+ case <-om.closing:
+ return
+ }
+ }
+}
+
+func (om *offsetManager) flushToBroker() {
+ req := om.constructRequest()
+ if req == nil {
+ return
+ }
+
+ broker, err := om.coordinator()
+ if err != nil {
+ om.handleError(err)
+ return
+ }
+
+ resp, err := broker.CommitOffset(req)
+ if err != nil {
+ om.handleError(err)
+ om.releaseCoordinator(broker)
+ _ = broker.Close()
+ return
+ }
+
+ om.handleResponse(broker, req, resp)
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+ var r *OffsetCommitRequest
+ var perPartitionTimestamp int64
+ if om.conf.Consumer.Offsets.Retention == 0 {
+ perPartitionTimestamp = ReceiveTime
+ r = &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: om.group,
+ ConsumerID: om.memberID,
+ ConsumerGroupGeneration: om.generation,
+ }
+ } else {
+ r = &OffsetCommitRequest{
+ Version: 2,
+ RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
+ ConsumerGroup: om.group,
+ ConsumerID: om.memberID,
+ ConsumerGroupGeneration: om.generation,
+ }
+
+ }
+
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.lock.Lock()
+ if pom.dirty {
+ r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
+ }
+ pom.lock.Unlock()
+ }
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if resp.Errors[pom.topic] == nil {
+ pom.handleError(ErrIncompleteResponse)
+ continue
+ }
+ if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+ pom.handleError(ErrIncompleteResponse)
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := req.blocks[pom.topic][pom.partition]
+ pom.updateCommitted(block.offset, block.metadata)
+ case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+ // not a critical error, we just need to redispatch
+ om.releaseCoordinator(broker)
+ case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+ // nothing we can do about this, just tell the user and carry on
+ pom.handleError(err)
+ case ErrOffsetsLoadInProgress:
+ // nothing wrong but we didn't commit, we'll get it next time round
+ case ErrUnknownTopicOrPartition:
+ // let the user know *and* try redispatching - if topic-auto-create is
+ // enabled, redispatching should trigger a metadata req and create the
+ // topic; if not then re-dispatching won't help, but we've let the user
+ // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+ fallthrough
+ default:
+ // dunno, tell the user and try redispatching
+ pom.handleError(err)
+ om.releaseCoordinator(broker)
+ }
+ }
+ }
+}
+
+func (om *offsetManager) handleError(err error) {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.handleError(err)
+ }
+ }
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.AsyncClose()
+ }
+ }
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+ om.pomsLock.Lock()
+ defer om.pomsLock.Unlock()
+
+ for topic, topicManagers := range om.poms {
+ for partition, pom := range topicManagers {
+ pom.lock.Lock()
+ releaseDue := pom.done && (force || !pom.dirty)
+ pom.lock.Unlock()
+
+ if releaseDue {
+ pom.release()
+
+ delete(om.poms[topic], partition)
+ if len(om.poms[topic]) == 0 {
+ delete(om.poms, topic)
+ }
+ }
+ }
+ remaining += len(om.poms[topic])
+ }
+ return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ if partitions, ok := om.poms[topic]; ok {
+ if pom, ok := partitions[partition]; ok {
+ return pom
+ }
+ }
+ return nil
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ done bool
+
+ releaseOnce sync.Once
+ errors chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+ if err != nil {
+ return nil, err
+ }
+
+ return &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ offset: offset,
+ metadata: metadata,
+ }, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset <= pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ pom.lock.Lock()
+ pom.done = true
+ pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) release() {
+ pom.releaseOnce.Do(func() {
+ close(pom.errors)
+ })
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 0000000..326c372
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,156 @@
+package sarama
+
+type offsetRequestBlock struct {
+ time int64
+ maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(int64(b.time))
+ if version == 0 {
+ pe.putInt32(b.maxOffsets)
+ }
+
+ return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.time, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 0 {
+ if b.maxOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type OffsetRequest struct {
+ Version int16
+ replicaID int32
+ isReplicaIDSet bool
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ if r.isReplicaIDSet {
+ pe.putInt32(r.replicaID)
+ } else {
+ // default replica ID is always -1 for clients
+ pe.putInt32(-1)
+ }
+
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ replicaID, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if replicaID >= 0 {
+ r.SetReplicaID(replicaID)
+ }
+
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd, version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+ r.replicaID = id
+ r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+ if r.isReplicaIDSet {
+ return r.replicaID
+ }
+ return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.time = time
+ if r.Version == 0 {
+ tmp.maxOffsets = maxOffsets
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 0000000..8b2193f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+ Err KError
+ Offsets []int64 // Version 0
+ Offset int64 // Version 1
+ Timestamp int64 // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ if version == 0 {
+ b.Offsets, err = pd.getInt64Array()
+
+ return err
+ }
+
+ b.Timestamp, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility put the offset in the offsets array too
+ b.Offsets = []int64{b.Offset}
+
+ return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ if version == 0 {
+ return pe.putInt64Array(b.Offsets)
+ }
+
+ pe.putInt64(b.Timestamp)
+ pe.putInt64(b.Offset)
+
+ return nil
+}
+
+type OffsetResponse struct {
+ Version int16
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.version()); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+ return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return MinVersion
+ }
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 0000000..9be854c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,61 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getVarint() (int64, error)
+ getArrayLength() (int, error)
+ getBool() (bool, error)
+
+ // Collections
+ getBytes() ([]byte, error)
+ getVarintBytes() ([]byte, error)
+ getRawBytes(length int) ([]byte, error)
+ getString() (string, error)
+ getNullableString() (*string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+ peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+ peekInt8(offset int) (int8, error) // similar to peek, but just one byte
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+ pushDecoder
+ decoder
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 0000000..67b8dae
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,65 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putVarint(in int64)
+ putArrayLength(in int) error
+ putBool(in bool)
+
+ // Collections
+ putBytes(in []byte) error
+ putVarintBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putNullableString(in *string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+
+ // Provide the current offset to record the batch size metric
+ offset() int
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+ pushEncoder
+
+ // Called during pop() to adjust the length of the field.
+ // It should return the difference in bytes between the last computed length and current length.
+ adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 0000000..6a708e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,217 @@
+package sarama
+
+import (
+ "hash"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+ Partitioner
+
+ // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+ // but takes in the message being partitioned so that the partitioner can
+ // make a per-message determination.
+ MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.referenceAbs = true
+ }
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.hasher = hasher()
+ }
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.random = hp
+ }
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+ referenceAbs bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = hasher()
+ p.referenceAbs = false
+ return p
+ }
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = false
+ for _, option := range options {
+ option(p)
+ }
+ return p
+ }
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = false
+ return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviours. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = true
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ var partition int32
+ // Turns out we were doing our absolute value in a subtly different way from the upstream
+ // implementation, but now we need to maintain backwards compat for people who started using
+ // the old version; if referenceAbs is set we are compatible with the reference java client
+ // but not past Sarama versions
+ if p.referenceAbs {
+ partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+ } else {
+ partition = int32(p.hasher.Sum32()) % numPartitions
+ if partition < 0 {
+ partition = -partition
+ }
+ }
+ return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+ return message.Key != nil
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 0000000..b633cd1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+ stack []pushEncoder
+ length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+ var buf [binary.MaxVarintLen64]byte
+ pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+ pe.length++
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ pe.putVarint(-1)
+ return nil
+ }
+ pe.putVarint(int64(len(in)))
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+ if in == nil {
+ pe.length += 2
+ return nil
+ }
+ return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) offset() int {
+ return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ in.saveOffset(pe.length)
+ pe.length += in.reserveLength()
+ pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+ in := pe.stack[len(pe.stack)-1]
+ pe.stack = pe.stack[:len(pe.stack)-1]
+ if dpe, ok := in.(dynamicPushEncoder); ok {
+ pe.length += dpe.adjustLength(pe.length)
+ }
+
+ return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 0000000..0c755d0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,252 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all in-sync replicas to commit before responding.
+ // The minimum number of in-sync replicas is configured on the broker via
+ // the `min.insync.replicas` configuration key.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ TransactionalID *string
+ RequiredAcks RequiredAcks
+ Timeout int32
+ Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+ records map[string]map[int32]Records
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram) int64 {
+ var topicRecordCount int64
+ for _, messageBlock := range msgSet.Messages {
+ // Is this a fake "message" wrapping real messages?
+ if messageBlock.Msg.Set != nil {
+ topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+ } else {
+ // A single uncompressed message
+ topicRecordCount++
+ }
+ // Better be safe than sorry when computing the compression ratio
+ if messageBlock.Msg.compressedSize != 0 {
+ compressionRatio := float64(len(messageBlock.Msg.Value)) /
+ float64(messageBlock.Msg.compressedSize)
+ // Histogram do not support decimal values, let's multiple it by 100 for better precision
+ intCompressionRatio := int64(100 * compressionRatio)
+ compressionRatioMetric.Update(intCompressionRatio)
+ topicCompressionRatioMetric.Update(intCompressionRatio)
+ }
+ }
+ return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram) int64 {
+ if recordBatch.compressedRecords != nil {
+ compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+ compressionRatioMetric.Update(compressionRatio)
+ topicCompressionRatioMetric.Update(compressionRatio)
+ }
+
+ return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ if err := pe.putNullableString(r.TransactionalID); err != nil {
+ return err
+ }
+ }
+ pe.putInt16(int16(r.RequiredAcks))
+ pe.putInt32(r.Timeout)
+ metricRegistry := pe.metricRegistry()
+ var batchSizeMetric metrics.Histogram
+ var compressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+ compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+ }
+ totalRecordCount := int64(0)
+
+ err := pe.putArrayLength(len(r.records))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.records {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ topicRecordCount := int64(0)
+ var topicCompressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+ }
+ for id, records := range partitions {
+ startOffset := pe.offset()
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = records.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ if metricRegistry != nil {
+ if r.Version >= 3 {
+ topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+ } else {
+ topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+ }
+ batchSize := int64(pe.offset() - startOffset)
+ batchSizeMetric.Update(batchSize)
+ getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+ }
+ }
+ if topicRecordCount > 0 {
+ getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+ getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+ totalRecordCount += topicRecordCount
+ }
+ }
+ if totalRecordCount > 0 {
+ metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+ getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ if version >= 3 {
+ id, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ r.TransactionalID = id
+ }
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.RequiredAcks = RequiredAcks(requiredAcks)
+ if r.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.records = make(map[string]map[int32]Records)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.records[topic] = make(map[int32]Records)
+
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ size, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ recordsDecoder, err := pd.getSubset(int(size))
+ if err != nil {
+ return err
+ }
+ var records Records
+ if err := records.decode(recordsDecoder); err != nil {
+ return err
+ }
+ r.records[topic][partition] = records
+ }
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+ return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_11_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+ if r.records == nil {
+ r.records = make(map[string]map[int32]Records)
+ }
+
+ if r.records[topic] == nil {
+ r.records[topic] = make(map[int32]Records)
+ }
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ r.ensureRecords(topic, partition)
+ set := r.records[topic][partition].MsgSet
+
+ if set == nil {
+ set = new(MessageSet)
+ r.records[topic][partition] = newLegacyRecords(set)
+ }
+
+ set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 0000000..4c5cd35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,189 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type ProduceResponseBlock struct {
+ Err KError
+ Offset int64
+ // only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+ Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if millis, err := pd.getInt64(); err != nil {
+ return err
+ } else if millis != -1 {
+ b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+ }
+
+ return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+ pe.putInt64(b.Offset)
+
+ if version >= 2 {
+ timestamp := int64(-1)
+ if !b.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !b.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+ }
+ pe.putInt64(timestamp)
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock
+ Version int16
+ ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ if r.Version >= 1 {
+ millis, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.ThrottleTime = time.Duration(millis) * time.Millisecond
+ }
+
+ return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ err = prb.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+ return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+ return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_11_0_0
+ default:
+ return MinVersion
+ }
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ block := &ProduceResponseBlock{
+ Err: err,
+ }
+ if r.Version >= 2 {
+ block.Timestamp = time.Now()
+ }
+ byTopic[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 0000000..bba0f7e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "time"
+)
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ recordsToSend Records
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ timestamp := msg.Timestamp
+ if msg.Timestamp.IsZero() {
+ timestamp = time.Now()
+ }
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ var size int
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ batch := &RecordBatch{
+ FirstTimestamp: timestamp,
+ Version: 2,
+ Codec: ps.parent.conf.Producer.Compression,
+ CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+ ProducerID: ps.parent.txnmgr.producerID,
+ ProducerEpoch: ps.parent.txnmgr.producerEpoch,
+ }
+ if ps.parent.conf.Producer.Idempotent {
+ batch.FirstSequence = msg.sequenceNumber
+ }
+ set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+ size = recordBatchOverhead
+ } else {
+ set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+ }
+ partitions[msg.Partition] = set
+ }
+ set.msgs = append(set.msgs, msg)
+
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+ return errors.New("assertion failed: message out of sequence added to a batch")
+ }
+ // We are being conservative here to avoid having to prep encode the record
+ size += maximumRecordOverhead
+ rec := &Record{
+ Key: key,
+ Value: val,
+ TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+ }
+ size += len(key) + len(val)
+ if len(msg.Headers) > 0 {
+ rec.Headers = make([]*RecordHeader, len(msg.Headers))
+ for i := range msg.Headers {
+ rec.Headers[i] = &msg.Headers[i]
+ size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+ }
+ }
+ set.recordsToSend.RecordBatch.addRecord(rec)
+ } else {
+ msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ msgToSend.Timestamp = timestamp
+ msgToSend.Version = 1
+ }
+ set.recordsToSend.MsgSet.addMessage(msgToSend)
+ size = producerMessageOverhead + len(key) + len(val)
+ }
+
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 2
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 3
+ }
+
+ for topic, partitionSets := range ps.msgs {
+ for partition, set := range partitionSets {
+ if req.Version >= 3 {
+ // If the API version we're hitting is 3 or greater, we need to calculate
+ // offsets for each record in the batch relative to FirstOffset.
+ // Additionally, we must set LastOffsetDelta to the value of the last offset
+ // in the batch. Since the OffsetDelta of the first record is 0, we know that the
+ // final record of any batch will have an offset of (# of records in batch) - 1.
+ // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+ // under the RecordBatch section for details.)
+ rb := set.recordsToSend.RecordBatch
+ if len(rb.Records) > 0 {
+ rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+ for i, record := range rb.Records {
+ record.OffsetDelta = int64(i)
+ }
+ }
+ req.AddBatch(topic, partition, rb)
+ continue
+ }
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ // If our version is 0.10 or later, assign relative offsets
+ // to the inner messages. This lets the broker avoid
+ // recompressing the message set.
+ // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+ // for details on relative offsets.)
+ for i, msg := range set.recordsToSend.MsgSet.Messages {
+ msg.Offset = int64(i)
+ }
+ }
+ payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ compMsg := &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+ Key: nil,
+ Value: payload,
+ Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ compMsg.Version = 1
+ compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+ }
+ req.AddMessage(topic, partition, compMsg)
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ version := 1
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ }
+
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a message-batch for this partition?
+ case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 0000000..085cbb3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,332 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+var errVarintOverflow = PacketDecodingError{"varint overflow"}
+var errInvalidBool = PacketDecodingError{"invalid bool"}
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+ tmp, n := binary.Varint(rd.raw[rd.off:])
+ if n == 0 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ if n < 0 {
+ rd.off -= n
+ return -1, errVarintOverflow
+ }
+ rd.off += n
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > 2*math.MaxUint16 {
+ return -1, errInvalidArrayLength
+ }
+ return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+ b, err := rd.getInt8()
+ if err != nil || b == 0 {
+ return false, err
+ }
+ if b != 1 {
+ return false, errInvalidBool
+ }
+ return true, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+ tmp, err := rd.getVarint()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+ length, err := rd.getInt16()
+ if err != nil {
+ return 0, err
+ }
+
+ n := int(length)
+
+ switch {
+ case n < -1:
+ return 0, errInvalidStringLength
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return 0, ErrInsufficientData
+ }
+
+ return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ n, err := rd.getStringLength()
+ if err != nil || n == -1 {
+ return "", err
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+ n, err := rd.getStringLength()
+ if err != nil || n == -1 {
+ return nil, err
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ buf, err := rd.getRawBytes(length)
+ if err != nil {
+ return nil, err
+ }
+ return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+ if length < 0 {
+ return nil, errInvalidByteSliceLength
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+ if rd.remaining() < offset+length {
+ return nil, ErrInsufficientData
+ }
+ off := rd.off + offset
+ return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+ const byteLen = 1
+ if rd.remaining() < offset+byteLen {
+ return -1, ErrInsufficientData
+ }
+ return int8(rd.raw[rd.off+offset]), nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ var reserve int
+ if dpd, ok := in.(dynamicPushDecoder); ok {
+ if err := dpd.decode(rd); err != nil {
+ return err
+ }
+ } else {
+ reserve = in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 0000000..3c75387
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+ "encoding/binary"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+ registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+ re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+ if in {
+ re.putInt8(1)
+ return
+ }
+ re.putInt8(0)
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ re.putVarint(-1)
+ return nil
+ }
+ re.putVarint(int64(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+ if in == nil {
+ re.putInt16(-1)
+ return nil
+ }
+ return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) offset() int {
+ return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+ return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
new file mode 100644
index 0000000..cdccfe3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -0,0 +1,116 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "time"
+)
+
+const (
+ isTransactionalMask = 0x10
+ controlMask = 0x20
+ maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
+)
+
+//RecordHeader stores key and value for a record header
+type RecordHeader struct {
+ Key []byte
+ Value []byte
+}
+
+func (h *RecordHeader) encode(pe packetEncoder) error {
+ if err := pe.putVarintBytes(h.Key); err != nil {
+ return err
+ }
+ return pe.putVarintBytes(h.Value)
+}
+
+func (h *RecordHeader) decode(pd packetDecoder) (err error) {
+ if h.Key, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ if h.Value, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+ return nil
+}
+
+//Record is kafka record type
+type Record struct {
+ Headers []*RecordHeader
+
+ Attributes int8
+ TimestampDelta time.Duration
+ OffsetDelta int64
+ Key []byte
+ Value []byte
+ length varintLengthField
+}
+
+func (r *Record) encode(pe packetEncoder) error {
+ pe.push(&r.length)
+ pe.putInt8(r.Attributes)
+ pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
+ pe.putVarint(r.OffsetDelta)
+ if err := pe.putVarintBytes(r.Key); err != nil {
+ return err
+ }
+ if err := pe.putVarintBytes(r.Value); err != nil {
+ return err
+ }
+ pe.putVarint(int64(len(r.Headers)))
+
+ for _, h := range r.Headers {
+ if err := h.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return pe.pop()
+}
+
+func (r *Record) decode(pd packetDecoder) (err error) {
+ if err = pd.push(&r.length); err != nil {
+ return err
+ }
+
+ if r.Attributes, err = pd.getInt8(); err != nil {
+ return err
+ }
+
+ timestamp, err := pd.getVarint()
+ if err != nil {
+ return err
+ }
+ r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
+
+ if r.OffsetDelta, err = pd.getVarint(); err != nil {
+ return err
+ }
+
+ if r.Key, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ if r.Value, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ numHeaders, err := pd.getVarint()
+ if err != nil {
+ return err
+ }
+
+ if numHeaders >= 0 {
+ r.Headers = make([]*RecordHeader, numHeaders)
+ }
+ for i := int64(0); i < numHeaders; i++ {
+ hdr := new(RecordHeader)
+ if err := hdr.decode(pd); err != nil {
+ return err
+ }
+ r.Headers[i] = hdr
+ }
+
+ return pd.pop()
+}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
new file mode 100644
index 0000000..c653763
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -0,0 +1,225 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+ for _, r := range e {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+ for i := range e {
+ rec := &Record{}
+ if err := rec.decode(pd); err != nil {
+ return err
+ }
+ e[i] = rec
+ }
+ return nil
+}
+
+type RecordBatch struct {
+ FirstOffset int64
+ PartitionLeaderEpoch int32
+ Version int8
+ Codec CompressionCodec
+ CompressionLevel int
+ Control bool
+ LogAppendTime bool
+ LastOffsetDelta int32
+ FirstTimestamp time.Time
+ MaxTimestamp time.Time
+ ProducerID int64
+ ProducerEpoch int16
+ FirstSequence int32
+ Records []*Record
+ PartialTrailingRecord bool
+ IsTransactional bool
+
+ compressedRecords []byte
+ recordsLen int // uncompressed records size
+}
+
+func (b *RecordBatch) LastOffset() int64 {
+ return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+ if b.Version != 2 {
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+ }
+ pe.putInt64(b.FirstOffset)
+ pe.push(&lengthField{})
+ pe.putInt32(b.PartitionLeaderEpoch)
+ pe.putInt8(b.Version)
+ pe.push(newCRC32Field(crcCastagnoli))
+ pe.putInt16(b.computeAttributes())
+ pe.putInt32(b.LastOffsetDelta)
+
+ if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ pe.putInt64(b.ProducerID)
+ pe.putInt16(b.ProducerEpoch)
+ pe.putInt32(b.FirstSequence)
+
+ if err := pe.putArrayLength(len(b.Records)); err != nil {
+ return err
+ }
+
+ if b.compressedRecords == nil {
+ if err := b.encodeRecords(pe); err != nil {
+ return err
+ }
+ }
+ if err := pe.putRawBytes(b.compressedRecords); err != nil {
+ return err
+ }
+
+ if err := pe.pop(); err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+ if b.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ batchLen, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if b.Version, err = pd.getInt8(); err != nil {
+ return err
+ }
+
+ crc32Decoder := acquireCrc32Field(crcCastagnoli)
+ defer releaseCrc32Field(crc32Decoder)
+
+ if err = pd.push(crc32Decoder); err != nil {
+ return err
+ }
+
+ attributes, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+ b.Control = attributes&controlMask == controlMask
+ b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask
+ b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
+
+ if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if b.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.FirstSequence, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ numRecs, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if numRecs >= 0 {
+ b.Records = make([]*Record, numRecs)
+ }
+
+ bufSize := int(batchLen) - recordBatchOverhead
+ recBuffer, err := pd.getRawBytes(bufSize)
+ if err != nil {
+ if err == ErrInsufficientData {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ recBuffer, err = decompress(b.Codec, recBuffer)
+ if err != nil {
+ return err
+ }
+
+ b.recordsLen = len(recBuffer)
+ err = decode(recBuffer, recordsArray(b.Records))
+ if err == ErrInsufficientData {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+ var raw []byte
+ var err error
+ if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+ return err
+ }
+ b.recordsLen = len(raw)
+
+ b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+ return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+ attr := int16(b.Codec) & int16(compressionCodecMask)
+ if b.Control {
+ attr |= controlMask
+ }
+ if b.LogAppendTime {
+ attr |= timestampTypeMask
+ }
+ if b.IsTransactional {
+ attr |= isTransactionalMask
+ }
+ return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+ b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
new file mode 100644
index 0000000..98160c7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -0,0 +1,204 @@
+package sarama
+
+import "fmt"
+
+const (
+ unknownRecords = iota
+ legacyRecords
+ defaultRecords
+
+ magicOffset = 16
+ magicLength = 1
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+ recordsType int
+ MsgSet *MessageSet
+ RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+ return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+ return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+ if r.MsgSet == nil && r.RecordBatch == nil {
+ return true, nil
+ }
+ if r.MsgSet != nil && r.RecordBatch != nil {
+ return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+ }
+ r.recordsType = defaultRecords
+ if r.MsgSet != nil {
+ r.recordsType = legacyRecords
+ }
+ return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return nil
+ }
+ return r.MsgSet.encode(pe)
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return nil
+ }
+ return r.RecordBatch.encode(pe)
+ }
+
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+ magic, err := magicValue(pd)
+ if err != nil {
+ return err
+ }
+
+ r.recordsType = defaultRecords
+ if magic < 2 {
+ r.recordsType = legacyRecords
+ }
+
+ return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+ if r.recordsType == unknownRecords {
+ if err := r.setTypeFromMagic(pd); err != nil {
+ return err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ r.MsgSet = &MessageSet{}
+ return r.MsgSet.decode(pd)
+ case defaultRecords:
+ r.RecordBatch = &RecordBatch{}
+ return r.RecordBatch.decode(pd)
+ }
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return 0, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return 0, nil
+ }
+ return len(r.MsgSet.Messages), nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return 0, nil
+ }
+ return len(r.RecordBatch.Records), nil
+ }
+ return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case unknownRecords:
+ return false, nil
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return false, nil
+ }
+ return r.MsgSet.PartialTrailingMessage, nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return false, nil
+ }
+ return r.RecordBatch.PartialTrailingRecord, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ return false, nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return false, nil
+ }
+ return r.RecordBatch.Control, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case unknownRecords:
+ return false, nil
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return false, nil
+ }
+ return r.MsgSet.OverflowMessage, nil
+ case defaultRecords:
+ return false, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+ return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+ if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
+ return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
+ }
+
+ firstRecord := r.RecordBatch.Records[0]
+ controlRecord := ControlRecord{}
+ err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+ if err != nil {
+ return ControlRecord{}, err
+ }
+
+ return controlRecord, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 0000000..97437d6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,171 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type protocolBody interface {
+ encoder
+ versionedDecoder
+ key() int16
+ version() int16
+ requiredVersion() KafkaVersion
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) error {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+
+ err := pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+
+ err = r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ key, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ version, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.correlationID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.clientID, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+
+ return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (*request, int, error) {
+ var (
+ bytesRead int
+ lengthBytes = make([]byte, 4)
+ )
+
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, bytesRead, err
+ }
+
+ bytesRead += len(lengthBytes)
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+ if length <= 4 || length > MaxRequestSize {
+ return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, bytesRead, err
+ }
+
+ bytesRead += len(encodedReq)
+
+ req := &request{}
+ if err := decode(encodedReq, req); err != nil {
+ return nil, bytesRead, err
+ }
+
+ return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+ switch key {
+ case 0:
+ return &ProduceRequest{}
+ case 1:
+ return &FetchRequest{}
+ case 2:
+ return &OffsetRequest{Version: version}
+ case 3:
+ return &MetadataRequest{}
+ case 8:
+ return &OffsetCommitRequest{Version: version}
+ case 9:
+ return &OffsetFetchRequest{}
+ case 10:
+ return &FindCoordinatorRequest{}
+ case 11:
+ return &JoinGroupRequest{}
+ case 12:
+ return &HeartbeatRequest{}
+ case 13:
+ return &LeaveGroupRequest{}
+ case 14:
+ return &SyncGroupRequest{}
+ case 15:
+ return &DescribeGroupsRequest{}
+ case 16:
+ return &ListGroupsRequest{}
+ case 17:
+ return &SaslHandshakeRequest{}
+ case 18:
+ return &ApiVersionsRequest{}
+ case 19:
+ return &CreateTopicsRequest{}
+ case 20:
+ return &DeleteTopicsRequest{}
+ case 21:
+ return &DeleteRecordsRequest{}
+ case 22:
+ return &InitProducerIDRequest{}
+ case 24:
+ return &AddPartitionsToTxnRequest{}
+ case 25:
+ return &AddOffsetsToTxnRequest{}
+ case 26:
+ return &EndTxnRequest{}
+ case 28:
+ return &TxnOffsetCommitRequest{}
+ case 29:
+ return &DescribeAclsRequest{}
+ case 30:
+ return &CreateAclsRequest{}
+ case 31:
+ return &DeleteAclsRequest{}
+ case 32:
+ return &DescribeConfigsRequest{}
+ case 33:
+ return &AlterConfigsRequest{}
+ case 35:
+ return &DescribeLogDirsRequest{}
+ case 36:
+ return &SaslAuthenticateRequest{}
+ case 37:
+ return &CreatePartitionsRequest{}
+ case 42:
+ return &DeleteGroupsRequest{}
+ }
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 0000000..7a75918
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,24 @@
+package sarama
+
+import "fmt"
+
+const responseLengthSize = 4
+const correlationIDSize = 4
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 0000000..1e0277a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,106 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use Consumer or Consumer-Group API.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | Name | Type | Description |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | incoming-byte-rate | meter | Bytes/second read off all brokers |
+ | incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
+ | outgoing-byte-rate | meter | Bytes/second written off all brokers |
+ | outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
+ | request-rate | meter | Requests/second sent to all brokers |
+ | request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
+ | request-size | histogram | Distribution of the request size in bytes for all brokers |
+ | request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
+ | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
+ | request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
+ | response-rate | meter | Responses/second received from all brokers |
+ | response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
+ | response-size | histogram | Distribution of the response size in bytes for all brokers |
+ | response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
+ | batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
+ | record-send-rate | meter | Records/second sent to all topics |
+ | record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
+ | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
+ | records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
+ | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
+ | compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+Consumer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+var (
+ // Logger is the instance of a StdLogger interface that Sarama writes connection
+ // management events to. By default it is set to discard all log messages via ioutil.Discard,
+ // but you can set it to redirect wherever you want.
+ Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+ // PanicHandler is called for recovering from panics spawned internally to the library (and thus
+ // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+ PanicHandler func(interface{})
+
+ // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+ // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+ // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+ // to process.
+ MaxRequestSize int32 = 100 * 1024 * 1024
+
+ // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+ // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+ // protect the client from running out of memory. Please note that brokers do not have any natural limit on
+ // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+ // (see https://issues.apache.org/jira/browse/KAFKA-2063).
+ MaxResponseSize int32 = 100 * 1024 * 1024
+)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
new file mode 100644
index 0000000..54c8b09
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
@@ -0,0 +1,29 @@
+package sarama
+
+type SaslAuthenticateRequest struct {
+ SaslAuthBytes []byte
+}
+
+// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API
+const APIKeySASLAuth = 36
+
+func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
+ return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.SaslAuthBytes, err = pd.getBytes()
+ return err
+}
+
+func (r *SaslAuthenticateRequest) key() int16 {
+ return APIKeySASLAuth
+}
+
+func (r *SaslAuthenticateRequest) version() int16 {
+ return 0
+}
+
+func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
new file mode 100644
index 0000000..0038c3f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+type SaslAuthenticateResponse struct {
+ Err KError
+ ErrorMessage *string
+ SaslAuthBytes []byte
+}
+
+func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if err := pe.putNullableString(r.ErrorMessage); err != nil {
+ return err
+ }
+ return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ r.SaslAuthBytes, err = pd.getBytes()
+
+ return err
+}
+
+func (r *SaslAuthenticateResponse) key() int16 {
+ return APIKeySASLAuth
+}
+
+func (r *SaslAuthenticateResponse) version() int16 {
+ return 0
+}
+
+func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
+ return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..fe5ba05
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,34 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+ Mechanism string
+ Version int16
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Mechanism); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.Mechanism, err = pd.getString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+ return r.Version
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..ef290d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+ Err KError
+ EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
new file mode 100644
index 0000000..bb0c82c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
@@ -0,0 +1,124 @@
+package sarama
+
+type topicPartitionAssignment struct {
+ Topic string
+ Partition int32
+}
+
+type StickyAssignorUserData interface {
+ partitions() []topicPartitionAssignment
+ hasGeneration() bool
+ generation() int
+}
+
+//StickyAssignorUserDataV0 holds topic partition information for an assignment
+type StickyAssignorUserDataV0 struct {
+ Topics map[string][]int32
+
+ topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) {
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+ m.topicPartitions = populateTopicPartitions(m.Topics)
+ return nil
+}
+
+func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false }
+func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration }
+
+//StickyAssignorUserDataV1 holds topic partition information for an assignment
+type StickyAssignorUserDataV1 struct {
+ Topics map[string][]int32
+ Generation int32
+
+ topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ pe.putInt32(m.Generation)
+ return nil
+}
+
+func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) {
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ m.Generation, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ m.topicPartitions = populateTopicPartitions(m.Topics)
+ return nil
+}
+
+func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true }
+func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) }
+
+func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment {
+ topicPartitions := make([]topicPartitionAssignment, 0)
+ for topic, partitions := range topics {
+ for _, partition := range partitions {
+ topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition})
+ }
+ }
+ return topicPartitions
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 0000000..fe20708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+ return err
+ }
+ for memberId, memberAssignment := range r.GroupAssignments {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := pe.putBytes(memberAssignment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupAssignments = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ memberAssignment, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ if r.GroupAssignments == nil {
+ r.GroupAssignments = make(map[string][]byte)
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+ bin, err := encode(memberAssignment, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupAssignment(memberId, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 0000000..194b382
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+ Err KError
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(r.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ r.MemberAssignment, err = pd.getBytes()
+ return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 0000000..021c5a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,149 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // SendMessages produces a given set of messages, and returns only when all
+ // messages in the set have either succeeded or failed. Note that messages
+ // can succeed and fail individually; if some succeed and some fail,
+ // SendMessages will return an error.
+ SendMessages(msgs []*ProducerMessage) error
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ if config == nil {
+ config = NewConfig()
+ config.Producer.Return.Successes = true
+ }
+
+ if err := verifyProducerConfig(config); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ if err := verifyProducerConfig(client.Config()); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+ if !config.Producer.Return.Errors {
+ return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+ }
+ if !config.Producer.Return.Successes {
+ return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+ }
+ return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ expectation := make(chan *ProducerError, 1)
+ msg.expectation = expectation
+ sp.producer.Input() <- msg
+
+ if err := <-expectation; err != nil {
+ return -1, -1, err.Err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+ expectations := make(chan chan *ProducerError, len(msgs))
+ go func() {
+ for _, msg := range msgs {
+ expectation := make(chan *ProducerError, 1)
+ msg.expectation = expectation
+ sp.producer.Input() <- msg
+ expectations <- expectation
+ }
+ close(expectations)
+ }()
+
+ var errors ProducerErrors
+ for expectation := range expectations {
+ if err := <-expectation; err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.expectation
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.expectation
+ expectation <- err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go
new file mode 100644
index 0000000..372278d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/timestamp.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type Timestamp struct {
+ *time.Time
+}
+
+func (t Timestamp) encode(pe packetEncoder) error {
+ timestamp := int64(-1)
+
+ if !t.Before(time.Unix(0, 0)) {
+ timestamp = t.UnixNano() / int64(time.Millisecond)
+ } else if !t.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
+ }
+
+ pe.putInt64(timestamp)
+ return nil
+}
+
+func (t Timestamp) decode(pd packetDecoder) error {
+ millis, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // negative timestamps are invalid, in these cases we should return
+ // a zero time
+ timestamp := time.Time{}
+ if millis >= 0 {
+ timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+
+ *t.Time = timestamp
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..71e95b8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+ TransactionalID string
+ GroupID string
+ ProducerID int64
+ ProducerEpoch int16
+ Topics map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(t.TransactionalID); err != nil {
+ return err
+ }
+ if err := pe.putString(t.GroupID); err != nil {
+ return err
+ }
+ pe.putInt64(t.ProducerID)
+ pe.putInt16(t.ProducerEpoch)
+
+ if err := pe.putArrayLength(len(t.Topics)); err != nil {
+ return err
+ }
+ for topic, partitions := range t.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for _, partition := range partitions {
+ if err := partition.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ if t.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if t.GroupID, err = pd.getString(); err != nil {
+ return err
+ }
+ if t.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics = make(map[string][]*PartitionOffsetMetadata)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+ for j := 0; j < m; j++ {
+ partitionOffsetMetadata := new(PartitionOffsetMetadata)
+ if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+ return err
+ }
+ t.Topics[topic][j] = partitionOffsetMetadata
+ }
+ }
+
+ return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+ return 28
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+ return 0
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
+
+type PartitionOffsetMetadata struct {
+ Partition int32
+ Offset int64
+ Metadata *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
+ pe.putInt32(p.Partition)
+ pe.putInt64(p.Offset)
+ if err := pe.putNullableString(p.Metadata); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+ if p.Partition, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if p.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if p.Metadata, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..6c980f4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -0,0 +1,83 @@
+package sarama
+
+import (
+ "time"
+)
+
+type TxnOffsetCommitResponse struct {
+ ThrottleTime time.Duration
+ Topics map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+ pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
+ if err := pe.putArrayLength(len(t.Topics)); err != nil {
+ return err
+ }
+
+ for topic, e := range t.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(e)); err != nil {
+ return err
+ }
+ for _, partitionError := range e {
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics = make(map[string][]*PartitionError)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics[topic] = make([]*PartitionError, m)
+
+ for j := 0; j < m; j++ {
+ t.Topics[topic][j] = new(PartitionError)
+ if err := t.Topics[topic][j].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+ return 28
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+ return 0
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+ return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 0000000..7c815cd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,225 @@
+package sarama
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "regexp"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ for _, val := range input {
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ tmp := b // local var prevents clobbering in goroutine
+ go withRecover(func() {
+ if connected, _ := tmp.Connected(); connected {
+ if err := tmp.Close(); err != nil {
+ Logger.Println("Error closing broker", tmp.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+ net.Conn
+ buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+ return &bufConn{
+ Conn: conn,
+ buf: bufio.NewReader(conn),
+ }
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+ return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+ // it's a struct rather than just typing the array directly to make it opaque and stop people
+ // generating their own arbitrary versions
+ version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+ return KafkaVersion{
+ version: [4]uint{major, minor, veryMinor, patch},
+ }
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+// V1.IsAtLeast(V2) // false
+// V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+ for i := range v.version {
+ if v.version[i] > other.version[i] {
+ return true
+ } else if v.version[i] < other.version[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+ V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
+ V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
+ V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
+ V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
+ V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
+ V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+ V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+ V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+ V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+ V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+ V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+ V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+ V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+ V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+ V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
+ V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
+ V1_1_1_0 = newKafkaVersion(1, 1, 1, 0)
+ V2_0_0_0 = newKafkaVersion(2, 0, 0, 0)
+ V2_0_1_0 = newKafkaVersion(2, 0, 1, 0)
+ V2_1_0_0 = newKafkaVersion(2, 1, 0, 0)
+ V2_2_0_0 = newKafkaVersion(2, 2, 0, 0)
+ V2_3_0_0 = newKafkaVersion(2, 3, 0, 0)
+
+ SupportedVersions = []KafkaVersion{
+ V0_8_2_0,
+ V0_8_2_1,
+ V0_8_2_2,
+ V0_9_0_0,
+ V0_9_0_1,
+ V0_10_0_0,
+ V0_10_0_1,
+ V0_10_1_0,
+ V0_10_1_1,
+ V0_10_2_0,
+ V0_10_2_1,
+ V0_11_0_0,
+ V0_11_0_1,
+ V0_11_0_2,
+ V1_0_0_0,
+ V1_1_0_0,
+ V1_1_1_0,
+ V2_0_0_0,
+ V2_0_1_0,
+ V2_1_0_0,
+ V2_2_0_0,
+ V2_3_0_0,
+ }
+ MinVersion = V0_8_2_0
+ MaxVersion = V2_3_0_0
+)
+
+//ParseKafkaVersion parses and returns kafka version or error from a string
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+ if len(s) < 5 {
+ return MinVersion, fmt.Errorf("invalid version `%s`", s)
+ }
+ var major, minor, veryMinor, patch uint
+ var err error
+ if s[0] == '0' {
+ err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+ } else {
+ err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+ }
+ if err != nil {
+ return MinVersion, err
+ }
+ return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
+ if !regexp.MustCompile(pattern).MatchString(s) {
+ return fmt.Errorf("invalid version `%s`", s)
+ }
+ _, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+ return err
+}
+
+func (v KafkaVersion) String() string {
+ if v.version[0] == 0 {
+ return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+ }
+
+ return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
new file mode 100644
index 0000000..58880e2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd.go
@@ -0,0 +1,27 @@
+package sarama
+
+import (
+ "github.com/klauspost/compress/zstd"
+ "sync"
+)
+
+var (
+ zstdDec *zstd.Decoder
+ zstdEnc *zstd.Encoder
+
+ zstdEncOnce, zstdDecOnce sync.Once
+)
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+ zstdDecOnce.Do(func() {
+ zstdDec, _ = zstd.NewReader(nil)
+ })
+ return zstdDec.DecodeAll(src, dst)
+}
+
+func zstdCompress(dst, src []byte) ([]byte, error) {
+ zstdEncOnce.Do(func() {
+ zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
+ })
+ return zstdEnc.EncodeAll(src, dst), nil
+}