SEBA-749 Initial Checkin of nem-ondemand-proxy;
Add Makefile
Add Kubernetes yaml and scripts

Change-Id: Ic76d2a68bb11a95d5d57a04f1fab373ec36c0958
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 0000000..6e362e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,27 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+coverage.txt
+profile.out
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 0000000..cace313
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,37 @@
+dist: xenial
+language: go
+go:
+- 1.12.x
+- 1.13.x
+
+env:
+  global:
+  - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+  - TOXIPROXY_ADDR=http://localhost:8474
+  - KAFKA_INSTALL_ROOT=/home/travis/kafka
+  - KAFKA_HOSTNAME=localhost
+  - DEBUG=true
+  matrix:
+  - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
+  - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+- vagrant/run_java_producer.sh
+
+install: make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- if [[ "$TRAVIS_GO_VERSION" == 1.13* ]]; then make fmt; fi
+
+after_success:
+- go tool cover -func coverage.txt
+- bash <(curl -s https://codecov.io/bash)
+
+after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 0000000..844f481
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,867 @@
+# Changelog
+
+#### Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/Shopify/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/Shopify/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/Shopify/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/Shopify/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/Shopify/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/Shopify/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/Shopify/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/Shopify/sarama/pull/1545)).
+
+#### Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+  ([1520](https://github.com/Shopify/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+  ([1518](https://github.com/Shopify/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+  ([1525](https://github.com/Shopify/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+  ([1529](https://github.com/Shopify/sarama/pull/1529)).
+
+#### Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+  ([1416](https://github.com/Shopify/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+  ([1477](https://github.com/Shopify/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+  ([1415](https://github.com/Shopify/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+  ([1452](https://github.com/Shopify/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+  ([1458](https://github.com/Shopify/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+  ([1461](https://github.com/Shopify/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+  ([1464](https://github.com/Shopify/sarama/pull/1464)).
+- Remove direct usage of gofork
+  ([1465](https://github.com/Shopify/sarama/pull/1465)).
+- Add support for Go 1.13
+  ([1478](https://github.com/Shopify/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+  ([1481](https://github.com/Shopify/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+  ([1434](https://github.com/Shopify/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+  ([1442](https://github.com/Shopify/sarama/pull/1442)).
+- Use released version of lz4 library
+  ([1469](https://github.com/Shopify/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+  ([1484](https://github.com/Shopify/sarama/pull/1484)).
+- Fix CLI help message typo
+  ([1494](https://github.com/Shopify/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+  See https://github.com/Shopify/sarama/issues/1252
+
+#### Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+  ([1425](https://github.com/Shopify/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+  ([1428](https://github.com/Shopify/sarama/pull/1428)).
+
+#### Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+  ([1418](https://github.com/Shopify/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+  ([1374](https://github.com/Shopify/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+  ([1417](https://github.com/Shopify/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+  ([1410](https://github.com/Shopify/sarama/pull/1410)).
+- Add kerberos support
+  ([1366](https://github.com/Shopify/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+  ([1406](https://github.com/Shopify/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+  ([1404](https://github.com/Shopify/sarama/pull/1404)).
+- Add support for error codes 77—81
+  ([1397](https://github.com/Shopify/sarama/pull/1397)).
+- Pool internal objects allocated per message
+  ([1385](https://github.com/Shopify/sarama/pull/1385)).
+- Reduce packet decoder allocations
+  ([1373](https://github.com/Shopify/sarama/pull/1373)).
+- Support timeout when fetching metadata
+  ([1359](https://github.com/Shopify/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+  ([1376](https://github.com/Shopify/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+  ([1383](https://github.com/Shopify/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+  ([1368](https://github.com/Shopify/sarama/pull/1368)).
+
+#### Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+  ([1350](https://github.com/Shopify/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+  ([1354](https://github.com/Shopify/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+  ([1353](https://github.com/Shopify/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+  ([1349](https://github.com/Shopify/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+  ([1344](https://github.com/Shopify/sarama/pull/1344)).
+
+#### Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+  ([1318](https://github.com/Shopify/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+  ([1326](https://github.com/Shopify/sarama/pull/1326)).
+- Implement ReadCommitted
+  ([1307](https://github.com/Shopify/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+  ([1331](https://github.com/Shopify/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+  ([1331](https://github.com/Shopify/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+  ([1232](https://github.com/Shopify/sarama/pull/1232)).
+- Add SCRAM authentication example
+  ([1303](https://github.com/Shopify/sarama/pull/1303)).
+- Add consumergroup examples
+  ([1304](https://github.com/Shopify/sarama/pull/1304)).
+- Expose consumer batch size metric
+  ([1296](https://github.com/Shopify/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+  ([1300](https://github.com/Shopify/sarama/pull/1300)).
+- Reduce client close bookkeeping
+  ([1297](https://github.com/Shopify/sarama/pull/1297)).
+- Satisfy error interface in create responses
+  ([1154](https://github.com/Shopify/sarama/pull/1154)).
+- Please lint gods
+  ([1346](https://github.com/Shopify/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+  ([1338](https://github.com/Shopify/sarama/pull/1338)).
+- Update lz4 to latest version
+  ([1347](https://github.com/Shopify/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+  ([1231](https://github.com/Shopify/sarama/pull/1231)).
+- Fix cleanup error handler
+  ([1332](https://github.com/Shopify/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+  ([1156](https://github.com/Shopify/sarama/pull/1156)).
+
+#### Version 1.21.0 (2019-02-24)
+
+New Features:
+- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
+  ([1236](https://github.com/Shopify/sarama/pull/1236)).
+- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
+  ([1178](https://github.com/Shopify/sarama/pull/1178)).
+- Implement SASL/OAUTHBEARER
+  ([1240](https://github.com/Shopify/sarama/pull/1240)).
+
+Improvements:
+- Add Go mod support
+  ([1282](https://github.com/Shopify/sarama/pull/1282)).
+- Add error codes 73—76
+  ([1239](https://github.com/Shopify/sarama/pull/1239)).
+- Add retry backoff function
+  ([1160](https://github.com/Shopify/sarama/pull/1160)).
+- Maintain metadata in the producer even when retries are disabled
+  ([1189](https://github.com/Shopify/sarama/pull/1189)).
+- Include ReplicaAssignment in ListTopics
+  ([1274](https://github.com/Shopify/sarama/pull/1274)).
+- Add producer performance tool
+  ([1222](https://github.com/Shopify/sarama/pull/1222)).
+- Add support LogAppend timestamps
+  ([1258](https://github.com/Shopify/sarama/pull/1258)).
+
+Bug Fixes:
+- Fix potential deadlock when a heartbeat request fails
+  ([1286](https://github.com/Shopify/sarama/pull/1286)).
+- Fix consuming compacted topic
+  ([1227](https://github.com/Shopify/sarama/pull/1227)).
+- Set correct Kafka version for DescribeConfigsRequest v1
+  ([1277](https://github.com/Shopify/sarama/pull/1277)).
+- Update kafka test version
+  ([1273](https://github.com/Shopify/sarama/pull/1273)).
+
+#### Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+  ([1100](https://github.com/Shopify/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+  ([1230](https://github.com/Shopify/sarama/pull/1230)).
+- Reuse compression objects
+  ([1185](https://github.com/Shopify/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+  ([1243](https://github.com/Shopify/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+  ([1242](https://github.com/Shopify/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+  ([1244](https://github.com/Shopify/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+  ([1203](https://github.com/Shopify/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+  ([1141](https://github.com/Shopify/sarama/pull/1141)).
+
+#### Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+   ([#1170](https://github.com/Shopify/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+   ([#1152](https://github.com/Shopify/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+   ([#1229](https://github.com/Shopify/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+   ([#1201](https://github.com/Shopify/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+   ([#1198](https://github.com/Shopify/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+   ([#1173](https://github.com/Shopify/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+   ([#1202](https://github.com/Shopify/sarama/pull/1202)).
+ - Add error codes 61 to 72
+   ([#1195](https://github.com/Shopify/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+   ([#1182](https://github.com/Shopify/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+   ([#1209](https://github.com/Shopify/sarama/pull/1209)).
+ - Fix typos in code and comments
+   ([#1228](https://github.com/Shopify/sarama/pull/1228)).
+
+#### Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+   ([#1099](https://github.com/Shopify/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+   ([#1176](https://github.com/Shopify/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+
+#### Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+   ([#1112](https://github.com/Shopify/sarama/pull/1112)).
+ - Add customizable partitioner
+   ([#1118](https://github.com/Shopify/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+   ([#1055](https://github.com/Shopify/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+   ([#1149](https://github.com/Shopify/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+   ([#1123](https://github.com/Shopify/sarama/pull/1123)).
+ - Simpler offset management
+   ([#1127](https://github.com/Shopify/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+   ([#1110](https://github.com/Shopify/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+   expected topic/partition blocks
+   ([#1086](https://github.com/Shopify/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+   ([#1115](https://github.com/Shopify/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+   ([#1142](https://github.com/Shopify/sarama/pull/1142)).
+ - Add version check when producing message with headers
+   ([#1117](https://github.com/Shopify/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+   ([#1132](https://github.com/Shopify/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+   ([#1125](https://github.com/Shopify/sarama/pull/1125)).
+
+#### Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+   ([#1044](https://github.com/Shopify/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+   ([#1047](https://github.com/Shopify/sarama/pull/1047),
+    [#1069](https://github.com/Shopify/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+   ([#1098](https://github.com/Shopify/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+   ([#1065](https://github.com/Shopify/sarama/pull/1065),
+    [#1096](https://github.com/Shopify/sarama/pull/1096),
+    [#1027](https://github.com/Shopify/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+   ([#1063](https://github.com/Shopify/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+   ([#1010](https://github.com/Shopify/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+   ([#1049](https://github.com/Shopify/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+   ([#1052](https://github.com/Shopify/sarama/pull/1052)).
+ - Add support for Go 1.10
+   ([#1064](https://github.com/Shopify/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+   ([#1073](https://github.com/Shopify/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+   ([#1050](https://github.com/Shopify/sarama/pull/1050),
+    [#1051](https://github.com/Shopify/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+   ([#1033](https://github.com/Shopify/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+   ([#1092](https://github.com/Shopify/sarama/pull/1092)).
+
+#### Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+   ([#1007](https://github.com/Shopify/sarama/pull/1007),
+    [#1008](https://github.com/Shopify/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+   ([#1009](https://github.com/Shopify/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+   ([#1016](https://github.com/Shopify/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+   ([#999](https://github.com/Shopify/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+   ([#1019](https://github.com/Shopify/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+   which results in much higher throughput in most cases
+   ([#1024](https://github.com/Shopify/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+   reduce CPU and memory usage when processing many partitions
+   ([#1028](https://github.com/Shopify/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+   recompression pass
+   ([#1002](https://github.com/Shopify/sarama/pull/1002),
+    [#1015](https://github.com/Shopify/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+   ([#1032](https://github.com/Shopify/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+   ([#1005](https://github.com/Shopify/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+   ([#1021](https://github.com/Shopify/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+   ([#1022](https://github.com/Shopify/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+   truncated before the magic value indicating its version
+   ([#1030](https://github.com/Shopify/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+   ([#1035](https://github.com/Shopify/sarama/pull/1035)).
+
+#### Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+   ([#984](https://github.com/Shopify/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+   ([#989](https://github.com/Shopify/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+   ([#985](https://github.com/Shopify/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+   ([#986](https://github.com/Shopify/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+   ([#982](https://github.com/Shopify/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+   ([#990](https://github.com/Shopify/sarama/pull/990)).
+ - Fix producing with multiple headers
+   ([#996](https://github.com/Shopify/sarama/pull/996)).
+ - Fix handling of truncated record batches
+   ([#998](https://github.com/Shopify/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+   ([#991](https://github.com/Shopify/sarama/pull/991)).
+
+#### Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+   protocol and the necessary behavioural changes in the producer and consumer.
+   Transactions and idempotency are not yet supported, but producing and
+   consuming should work with all the existing bells and whistles (batching,
+   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+   of Arista Networks for this work. Part of
+   ([#901](https://github.com/Shopify/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+   ([#970](https://github.com/Shopify/sarama/pull/970)).
+ - Return partial replicas list when we have it
+   ([#975](https://github.com/Shopify/sarama/pull/975)).
+
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+   ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+   ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+   extremely large clusters
+   ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+   ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+   ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+   by the broker, which can be meaningful
+   ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+   variance in the actual timeout
+   ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+   ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+   ([#940](https://github.com/Shopify/sarama/pull/940)).
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+   version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+   that you still need to specify the Kafka version in the Sarama configuration
+   for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+   active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+   in-sync broker IDs for the given partition, now that the Kafka versions for
+   which this was misleading are no longer in our supported set
+   ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+   partitioner with a custom hash method in case the default (FNV-1a) is not
+   suitable
+   ([#837](https://github.com/Shopify/sarama/pull/837),
+    [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+   correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+   transparent if you're using the `AddGroupProtocol` or
+   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+   ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+   ([#701](https://github.com/Shopify/sarama/pull/701),
+    [#746](https://github.com/Shopify/sarama/pull/746),
+    [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+   ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+   ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+   `HighWaterMarkOffset` values of its child topic/partitions
+   ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+   ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+   ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+   ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+   ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+   ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+   ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+   ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+   in v1.10.0. Go's timers are finicky
+   ([#730](https://github.com/Shopify/sarama/pull/730),
+    [#733](https://github.com/Shopify/sarama/pull/733),
+    [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+   ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+   ([#672](https://github.com/Shopify/sarama/pull/672),
+    [#678](https://github.com/Shopify/sarama/pull/678),
+    [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+   ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+   ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+   ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+   ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+   misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+   ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+   ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+   ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+   ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+   ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+   ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+   ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+   ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+   implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+   ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+   ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+   ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+   ([#605](https://github.com/Shopify/sarama/pull/605),
+    [#621](https://github.com/Shopify/sarama/pull/621),
+    [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+   ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+   - All protocol messages and fields
+   ([#586](https://github.com/Shopify/sarama/pull/586),
+   [#588](https://github.com/Shopify/sarama/pull/588),
+   [#590](https://github.com/Shopify/sarama/pull/590)).
+   - Verified that TLS support works
+   ([#581](https://github.com/Shopify/sarama/pull/581)).
+   - Fixed the OffsetManager compatibility
+   ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+   ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+   ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+   ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+   caveats:
+   - Protocol-layer support is mostly in place
+     ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+     renamed some messages and fields, which we did not in order to preserve API
+     compatibility.
+   - The producer and consumer work against 0.9, but the offset manager does
+     not ([#573](https://github.com/Shopify/sarama/pull/573)).
+   - TLS support may or may not work
+     ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+   when the TCP connection is left hanging
+   ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+   solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+   slightly more efficient, and much more precise in calculating batch sizes
+   when compression is used
+   ([#549](https://github.com/Shopify/sarama/pull/549),
+   [#550](https://github.com/Shopify/sarama/pull/550),
+   [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+   ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+   ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+   Kafka 0.8.2. The API is designed mainly for integration into a future
+   high-level consumer, not for direct use, although it is *possible* to use it
+   directly.
+   ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+   removing a major hotspot from most profiles
+   ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+   by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+   [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+   ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+   ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+   to change when Kafka releases built-in TLS support, but for now this is
+   enough to work with TLS-terminating proxies
+   ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+   all other partitions will continue to consume normally
+   ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+   ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+   future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+   ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+   circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+   the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+   changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+   ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+   ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+   ([#450](https://github.com/Shopify/sarama/pull/450),
+   [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+   users to dynamically choose what topics/partitions to consume without
+   instantiating a full client
+   ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+   ([#439](https://github.com/Shopify/sarama/pull/439),
+   [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+   useful, and slightly less verbose
+   ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+   thundering herd on the first broker in the list
+   ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+   shutting down, fixing several instances of confusing behaviour and at least
+   one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+   making it much more resilient to specific user code ordering
+   ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+   ConsumerMetadataRequests similar to how it tracks partition leadership using
+   regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+   This adds two methods to the client API:
+   - `Coordinator(consumerGroup string) (*Broker, error)`
+   - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+   ID/address/port combination for the Coordinator; accessing the fields
+   individually has been deprecated
+   ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+   Consumers will fail to start if the provided offset is out of range
+   ([#418](https://github.com/Shopify/sarama/pull/418))
+   and they will automatically shut down if the offset falls out of range
+   ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+   ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+   it happens to be activated while the client is being closed
+   ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+   ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+   messages due to an improved queue implementation
+   ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+   changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+   retry once in the event of stale information or similar
+   ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+   ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+   API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+   [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+   broken topics don't choke throughput
+   ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+   ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+   ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+   gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+   metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 0000000..d2bf435
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Shopify
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 0000000..9c8329e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,56 @@
+export GO111MODULE=on
+
+default: fmt vet errcheck test lint
+
+# Taken from https://github.com/codecov/example-go#caveat-multiple-files
+.PHONY: test
+test:
+	echo "mode: atomic" > coverage.txt
+	for d in `go list ./...`; do \
+		go test -p 1 -v -timeout 6m -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
+		if [ -f profile.out ]; then \
+			tail +2 profile.out >> coverage.txt; \
+			rm profile.out; \
+		fi \
+	done
+
+GOLINT := $(shell command -v golint)
+
+.PHONY: lint
+lint:
+ifndef GOLINT
+	go get golang.org/x/lint/golint
+endif
+	go list ./... | xargs golint
+
+.PHONY: vet
+vet:
+	go vet ./...
+
+ERRCHECK := $(shell command -v errcheck)
+# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
+.PHONY: errcheck
+errcheck:
+ifndef ERRCHECK
+	go get github.com/kisielk/errcheck
+endif
+	errcheck -ignorepkg fmt github.com/Shopify/sarama/...
+
+.PHONY: fmt
+fmt:
+	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+.PHONY : install_dependencies
+install_dependencies: get
+
+.PHONY: get
+get:
+	go get -v ./...
+
+.PHONY: clean
+clean:
+	go clean ./...
+
+.PHONY: tidy
+tidy:
+	go mod tidy -v
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 0000000..18ad7bf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,36 @@
+# sarama
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+## Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+## Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.12 through 1.13, and Kafka 2.1 through 2.4, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+## Contributing
+
+- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 0000000..f4b848a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+  config.vm.box = "ubuntu/trusty64"
+
+  config.vm.provision :shell, path: "vagrant/provision.sh"
+
+  config.vm.network "private_network", ip: "192.168.100.67"
+
+  config.vm.provider "virtualbox" do |v|
+    v.memory = MEMORY
+  end
+end
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
new file mode 100644
index 0000000..50b689d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -0,0 +1,138 @@
+package sarama
+
+//Resource holds information about acl resource type
+type Resource struct {
+	ResourceType        AclResourceType
+	ResourceName        string
+	ResourcePatternType AclResourcePatternType
+}
+
+func (r *Resource) encode(pe packetEncoder, version int16) error {
+	pe.putInt8(int8(r.ResourceType))
+
+	if err := pe.putString(r.ResourceName); err != nil {
+		return err
+	}
+
+	if version == 1 {
+		if r.ResourcePatternType == AclPatternUnknown {
+			Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
+			r.ResourcePatternType = AclPatternLiteral
+		}
+		pe.putInt8(int8(r.ResourcePatternType))
+	}
+
+	return nil
+}
+
+func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.ResourceType = AclResourceType(resourceType)
+
+	if r.ResourceName, err = pd.getString(); err != nil {
+		return err
+	}
+	if version == 1 {
+		pattern, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.ResourcePatternType = AclResourcePatternType(pattern)
+	}
+
+	return nil
+}
+
+//Acl holds information about acl type
+type Acl struct {
+	Principal      string
+	Host           string
+	Operation      AclOperation
+	PermissionType AclPermissionType
+}
+
+func (a *Acl) encode(pe packetEncoder) error {
+	if err := pe.putString(a.Principal); err != nil {
+		return err
+	}
+
+	if err := pe.putString(a.Host); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
+	if a.Principal, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
+
+//ResourceAcls is an acl resource type
+type ResourceAcls struct {
+	Resource
+	Acls []*Acl
+}
+
+func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
+	if err := r.Resource.encode(pe, version); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Acls)); err != nil {
+		return err
+	}
+	for _, acl := range r.Acls {
+		if err := acl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
+	if err := r.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Acls = make([]*Acl, n)
+	for i := 0; i < n; i++ {
+		r.Acls[i] = new(Acl)
+		if err := r.Acls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
new file mode 100644
index 0000000..da1cdef
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -0,0 +1,85 @@
+package sarama
+
+//CreateAclsRequest is an acl creation request
+type CreateAclsRequest struct {
+	Version      int16
+	AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+		return err
+	}
+
+	for _, aclCreation := range c.AclCreations {
+		if err := aclCreation.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreations = make([]*AclCreation, n)
+
+	for i := 0; i < n; i++ {
+		c.AclCreations[i] = new(AclCreation)
+		if err := c.AclCreations[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) key() int16 {
+	return 30
+}
+
+func (c *CreateAclsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+//AclCreation is a wrapper around Resource and Acl type
+type AclCreation struct {
+	Resource
+	Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder, version int16) error {
+	if err := a.Resource.encode(pe, version); err != nil {
+		return err
+	}
+	if err := a.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+	if err := a.Resource.decode(pd, version); err != nil {
+		return err
+	}
+	if err := a.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
new file mode 100644
index 0000000..f5a5e9a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -0,0 +1,90 @@
+package sarama
+
+import "time"
+
+//CreateAclsResponse is a an acl reponse creation type
+type CreateAclsResponse struct {
+	ThrottleTime         time.Duration
+	AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+		return err
+	}
+
+	for _, aclCreationResponse := range c.AclCreationResponses {
+		if err := aclCreationResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreationResponses = make([]*AclCreationResponse, n)
+	for i := 0; i < n; i++ {
+		c.AclCreationResponses[i] = new(AclCreationResponse)
+		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) key() int16 {
+	return 30
+}
+
+func (c *CreateAclsResponse) version() int16 {
+	return 0
+}
+
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+//AclCreationResponse is an acl creation response type
+type AclCreationResponse struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(a.Err))
+
+	if err := pe.putNullableString(a.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	if a.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
new file mode 100644
index 0000000..15908ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -0,0 +1,58 @@
+package sarama
+
+//DeleteAclsRequest is a delete acl request
+type DeleteAclsRequest struct {
+	Version int
+	Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Filters)); err != nil {
+		return err
+	}
+
+	for _, filter := range d.Filters {
+		filter.Version = d.Version
+		if err := filter.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.Filters = make([]*AclFilter, n)
+	for i := 0; i < n; i++ {
+		d.Filters[i] = new(AclFilter)
+		d.Filters[i].Version = int(version)
+		if err := d.Filters[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
new file mode 100644
index 0000000..6529565
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -0,0 +1,159 @@
+package sarama
+
+import "time"
+
+//DeleteAclsResponse is a delete acl response
+type DeleteAclsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	FilterResponses []*FilterResponse
+}
+
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
+		return err
+	}
+
+	for _, filterResponse := range d.FilterResponses {
+		if err := filterResponse.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.FilterResponses = make([]*FilterResponse, n)
+
+	for i := 0; i < n; i++ {
+		d.FilterResponses[i] = new(FilterResponse)
+		if err := d.FilterResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+//FilterResponse is a filter response type
+type FilterResponse struct {
+	Err          KError
+	ErrMsg       *string
+	MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
+	pe.putInt16(int16(f.Err))
+	if err := pe.putNullableString(f.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+		return err
+	}
+	for _, matchingAcl := range f.MatchingAcls {
+		if err := matchingAcl.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	f.Err = KError(kerr)
+
+	if f.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	f.MatchingAcls = make([]*MatchingAcl, n)
+	for i := 0; i < n; i++ {
+		f.MatchingAcls[i] = new(MatchingAcl)
+		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+//MatchingAcl is a matching acl type
+type MatchingAcl struct {
+	Err    KError
+	ErrMsg *string
+	Resource
+	Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
+	pe.putInt16(int16(m.Err))
+	if err := pe.putNullableString(m.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := m.Resource.encode(pe, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	m.Err = KError(kerr)
+
+	if m.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if err := m.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
new file mode 100644
index 0000000..5222d46
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -0,0 +1,35 @@
+package sarama
+
+//DescribeAclsRequest is a secribe acl request type
+type DescribeAclsRequest struct {
+	Version int
+	AclFilter
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+	d.AclFilter.Version = d.Version
+	return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	d.AclFilter.Version = int(version)
+	return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
new file mode 100644
index 0000000..12126e5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+import "time"
+
+//DescribeAclsResponse is a describe acl response type
+type DescribeAclsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(d.Err))
+
+	if err := pe.putNullableString(d.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+		return err
+	}
+
+	for _, resourceAcl := range d.ResourceAcls {
+		if err := resourceAcl.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	d.Err = KError(kerr)
+
+	errmsg, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	if errmsg != "" {
+		d.ErrMsg = &errmsg
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.ResourceAcls = make([]*ResourceAcls, n)
+
+	for i := 0; i < n; i++ {
+		d.ResourceAcls[i] = new(ResourceAcls)
+		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
new file mode 100644
index 0000000..fad5558
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -0,0 +1,78 @@
+package sarama
+
+type AclFilter struct {
+	Version                   int
+	ResourceType              AclResourceType
+	ResourceName              *string
+	ResourcePatternTypeFilter AclResourcePatternType
+	Principal                 *string
+	Host                      *string
+	Operation                 AclOperation
+	PermissionType            AclPermissionType
+}
+
+func (a *AclFilter) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.ResourceType))
+	if err := pe.putNullableString(a.ResourceName); err != nil {
+		return err
+	}
+
+	if a.Version == 1 {
+		pe.putInt8(int8(a.ResourcePatternTypeFilter))
+	}
+
+	if err := pe.putNullableString(a.Principal); err != nil {
+		return err
+	}
+	if err := pe.putNullableString(a.Host); err != nil {
+		return err
+	}
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.ResourceType = AclResourceType(resourceType)
+
+	if a.ResourceName, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Version == 1 {
+		pattern, err := pd.getInt8()
+
+		if err != nil {
+			return err
+		}
+
+		a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
+	}
+
+	if a.Principal, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
new file mode 100644
index 0000000..c10ad7b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -0,0 +1,55 @@
+package sarama
+
+type (
+	AclOperation int
+
+	AclPermissionType int
+
+	AclResourceType int
+
+	AclResourcePatternType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+	AclOperationUnknown AclOperation = iota
+	AclOperationAny
+	AclOperationAll
+	AclOperationRead
+	AclOperationWrite
+	AclOperationCreate
+	AclOperationDelete
+	AclOperationAlter
+	AclOperationDescribe
+	AclOperationClusterAction
+	AclOperationDescribeConfigs
+	AclOperationAlterConfigs
+	AclOperationIdempotentWrite
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+	AclPermissionUnknown AclPermissionType = iota
+	AclPermissionAny
+	AclPermissionDeny
+	AclPermissionAllow
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+	AclResourceUnknown AclResourceType = iota
+	AclResourceAny
+	AclResourceTopic
+	AclResourceGroup
+	AclResourceCluster
+	AclResourceTransactionalID
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+const (
+	AclPatternUnknown AclResourcePatternType = iota
+	AclPatternAny
+	AclPatternMatch
+	AclPatternLiteral
+	AclPatternPrefixed
+)
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..fc227ab
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,53 @@
+package sarama
+
+//AddOffsetsToTxnRequest adds offsets to a transaction request
+type AddOffsetsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	GroupID         string
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putString(a.GroupID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..c88c1f8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,45 @@
+package sarama
+
+import (
+	"time"
+)
+
+//AddOffsetsToTxnResponse is a response type for adding offsets to txns
+type AddOffsetsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(a.Err))
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..8d4b42e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,77 @@
+package sarama
+
+//AddPartitionsToTxnRequest is a add paartition request
+type AddPartitionsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt64(a.ProducerID)
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range a.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.TopicPartitions = make(map[string][]int32)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		a.TopicPartitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..eb4f23e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+	"time"
+)
+
+//AddPartitionsToTxnResponse is a partition errors to transaction type
+type AddPartitionsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Errors       map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(a.Errors)); err != nil {
+		return err
+	}
+
+	for topic, e := range a.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Errors = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		a.Errors[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			a.Errors[topic][j] = new(PartitionError)
+			if err := a.Errors[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+//PartitionError is a partition error type
+type PartitionError struct {
+	Partition int32
+	Err       KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt16(int16(p.Err))
+	return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	p.Err = KError(kerr)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
new file mode 100644
index 0000000..6c9b1e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -0,0 +1,690 @@
+package sarama
+
+import (
+	"errors"
+	"math/rand"
+	"sync"
+)
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+	// It may take several seconds after CreateTopic returns success for all the brokers
+	// to become aware that the topic has been created. During this time, listTopics
+	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+	// List the topics available in the cluster with the default options.
+	ListTopics() (map[string]TopicDetail, error)
+
+	// Describe some topics in the cluster.
+	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
+
+	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
+	// and for all the brokers to become aware that the topics are gone.
+	// During this time, listTopics  may continue to return information about the deleted topic.
+	// If delete.topic.enable is false on the brokers, deleteTopic will mark
+	// the topic for deletion, but not actually delete them.
+	// This operation is supported by brokers with version 0.10.1.0 or higher.
+	DeleteTopic(topic string) error
+
+	// Increase the number of partitions of the topics  according to the corresponding values.
+	// If partitions are increased for a topic that has a key, the partition logic or ordering of
+	// the messages will be affected. It may take several seconds after this method returns
+	// success for all the brokers to become aware that the partitions have been created.
+	// During this time, ClusterAdmin#describeTopics may not return information about the
+	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+	// Delete records whose offset is smaller than the given offset of the corresponding partition.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+	// Get the configuration for the specified resources.
+	// The returned configuration includes default values and the Default is true
+	// can be used to distinguish them from user supplied values.
+	// Config entries where ReadOnly is true cannot be updated.
+	// The value of config entries where Sensitive is true is always nil so
+	// sensitive information is not disclosed.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+	// Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	// The resources with their configs (topic is the only resource type with configs
+	// that can be updated currently Updates are not transactional so they may succeed
+	// for some resources while fail for others. The configs for a particular resource are updated automatically.
+	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+	// Creates access control lists (ACLs) which are bound to specific resources.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	CreateACL(resource Resource, acl Acl) error
+
+	// Lists access control lists (ACLs) according to the supplied filter.
+	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+	// Deletes access control lists (ACLs) according to the supplied filters.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+	// List the consumer groups available in the cluster.
+	ListConsumerGroups() (map[string]string, error)
+
+	// Describe the given consumer groups.
+	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
+
+	// List the consumer group offsets available in the cluster.
+	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
+
+	// Delete a consumer group.
+	DeleteConsumerGroup(group string) error
+
+	// Get information about the nodes in the cluster
+	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
+
+	// Close shuts down the admin and closes underlying client.
+	Close() error
+}
+
+type clusterAdmin struct {
+	client Client
+	conf   *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	return NewClusterAdminFromClient(client)
+}
+
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+	//make sure we can retrieve the controller
+	_, err := client.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	ca := &clusterAdmin{
+		client: client,
+		conf:   client.Config(),
+	}
+	return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+	return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+	return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	if detail == nil {
+		return errors.New("you must specify topic details")
+	}
+
+	topicDetails := make(map[string]*TopicDetail)
+	topicDetails[topic] = detail
+
+	request := &CreateTopicsRequest{
+		TopicDetails: topicDetails,
+		ValidateOnly: validateOnly,
+		Timeout:      ca.conf.Admin.Timeout,
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 2
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.CreateTopics(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicErrors[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr.Err != ErrNoError {
+		return topicErr
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
+	controller, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	request := &MetadataRequest{
+		Topics:                 topics,
+		AllowAutoTopicCreation: false,
+	}
+
+	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 5
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 4
+	}
+
+	response, err := controller.GetMetadata(request)
+	if err != nil {
+		return nil, err
+	}
+	return response.Topics, nil
+}
+
+func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
+	controller, err := ca.Controller()
+	if err != nil {
+		return nil, int32(0), err
+	}
+
+	request := &MetadataRequest{
+		Topics: []string{},
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	response, err := controller.GetMetadata(request)
+	if err != nil {
+		return nil, int32(0), err
+	}
+
+	return response.Brokers, response.ControllerID, nil
+}
+
+func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
+	brokers := ca.client.Brokers()
+	if len(brokers) > 0 {
+		index := rand.Intn(len(brokers))
+		return brokers[index], nil
+	}
+	return nil, errors.New("no available broker")
+}
+
+func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
+	// In order to build TopicDetails we need to first get the list of all
+	// topics using a MetadataRequest and then get their configs using a
+	// DescribeConfigsRequest request. To avoid sending many requests to the
+	// broker, we use a single DescribeConfigsRequest.
+
+	// Send the all-topic MetadataRequest
+	b, err := ca.findAnyBroker()
+	if err != nil {
+		return nil, err
+	}
+	_ = b.Open(ca.client.Config())
+
+	metadataReq := &MetadataRequest{}
+	metadataResp, err := b.GetMetadata(metadataReq)
+	if err != nil {
+		return nil, err
+	}
+
+	topicsDetailsMap := make(map[string]TopicDetail)
+
+	var describeConfigsResources []*ConfigResource
+
+	for _, topic := range metadataResp.Topics {
+		topicDetails := TopicDetail{
+			NumPartitions: int32(len(topic.Partitions)),
+		}
+		if len(topic.Partitions) > 0 {
+			topicDetails.ReplicaAssignment = map[int32][]int32{}
+			for _, partition := range topic.Partitions {
+				topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
+			}
+			topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
+		}
+		topicsDetailsMap[topic.Name] = topicDetails
+
+		// we populate the resources we want to describe from the MetadataResponse
+		topicResource := ConfigResource{
+			Type: TopicResource,
+			Name: topic.Name,
+		}
+		describeConfigsResources = append(describeConfigsResources, &topicResource)
+	}
+
+	// Send the DescribeConfigsRequest
+	describeConfigsReq := &DescribeConfigsRequest{
+		Resources: describeConfigsResources,
+	}
+	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, resource := range describeConfigsResp.Resources {
+		topicDetails := topicsDetailsMap[resource.Name]
+		topicDetails.ConfigEntries = make(map[string]*string)
+
+		for _, entry := range resource.Configs {
+			// only include non-default non-sensitive config
+			// (don't actually think topic config will ever be sensitive)
+			if entry.Default || entry.Sensitive {
+				continue
+			}
+			topicDetails.ConfigEntries[entry.Name] = &entry.Value
+		}
+
+		topicsDetailsMap[resource.Name] = topicDetails
+	}
+
+	return topicsDetailsMap, nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &DeleteTopicsRequest{
+		Topics:  []string{topic},
+		Timeout: ca.conf.Admin.Timeout,
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.DeleteTopics(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicErrorCodes[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr != ErrNoError {
+		return topicErr
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	topicPartitions := make(map[string]*TopicPartition)
+	topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
+
+	request := &CreatePartitionsRequest{
+		TopicPartitions: topicPartitions,
+		Timeout:         ca.conf.Admin.Timeout,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.CreatePartitions(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicPartitionErrors[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr.Err != ErrNoError {
+		return topicErr
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+	partitionPerBroker := make(map[*Broker][]int32)
+	for partition := range partitionOffsets {
+		broker, err := ca.client.Leader(topic, partition)
+		if err != nil {
+			return err
+		}
+		if _, ok := partitionPerBroker[broker]; ok {
+			partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+		} else {
+			partitionPerBroker[broker] = []int32{partition}
+		}
+	}
+	errs := make([]error, 0)
+	for broker, partitions := range partitionPerBroker {
+		topics := make(map[string]*DeleteRecordsRequestTopic)
+		recordsToDelete := make(map[int32]int64)
+		for _, p := range partitions {
+			recordsToDelete[p] = partitionOffsets[p]
+		}
+		topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
+		request := &DeleteRecordsRequest{
+			Topics:  topics,
+			Timeout: ca.conf.Admin.Timeout,
+		}
+
+		rsp, err := broker.DeleteRecords(request)
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+			if !ok {
+				errs = append(errs, ErrIncompleteResponse)
+			} else {
+				for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+					if deleteRecordsResponsePartition.Err != ErrNoError {
+						errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
+					}
+				}
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return ErrDeleteRecords{MultiError{&errs}}
+	}
+	//todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	//for each partition instead of one error
+	return nil
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+
+	var entries []ConfigEntry
+	var resources []*ConfigResource
+	resources = append(resources, &resource)
+
+	request := &DescribeConfigsRequest{
+		Resources: resources,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeConfigs(request)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == resource.Name {
+			if rspResource.ErrorMsg != "" {
+				return nil, errors.New(rspResource.ErrorMsg)
+			}
+			for _, cfgEntry := range rspResource.Configs {
+				entries = append(entries, *cfgEntry)
+			}
+		}
+	}
+	return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+
+	var resources []*AlterConfigsResource
+	resources = append(resources, &AlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &AlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.AlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorMsg != "" {
+				return errors.New(rspResource.ErrorMsg)
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+	var acls []*AclCreation
+	acls = append(acls, &AclCreation{resource, acl})
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+
+	request := &DescribeAclsRequest{AclFilter: filter}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var lAcls []ResourceAcls
+	for _, rAcl := range rsp.ResourceAcls {
+		lAcls = append(lAcls, *rAcl)
+	}
+	return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+	var filters []*AclFilter
+	filters = append(filters, &filter)
+	request := &DeleteAclsRequest{Filters: filters}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DeleteAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var mAcls []MatchingAcl
+	for _, fr := range rsp.FilterResponses {
+		for _, mACL := range fr.MatchingAcls {
+			mAcls = append(mAcls, *mACL)
+		}
+
+	}
+	return mAcls, nil
+}
+
+func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
+	groupsPerBroker := make(map[*Broker][]string)
+
+	for _, group := range groups {
+		controller, err := ca.client.Coordinator(group)
+		if err != nil {
+			return nil, err
+		}
+		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
+
+	}
+
+	for broker, brokerGroups := range groupsPerBroker {
+		response, err := broker.DescribeGroups(&DescribeGroupsRequest{
+			Groups: brokerGroups,
+		})
+		if err != nil {
+			return nil, err
+		}
+
+		result = append(result, response.Groups...)
+	}
+	return result, nil
+}
+
+func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
+	allGroups = make(map[string]string)
+
+	// Query brokers in parallel, since we have to query *all* brokers
+	brokers := ca.client.Brokers()
+	groupMaps := make(chan map[string]string, len(brokers))
+	errors := make(chan error, len(brokers))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokers {
+		wg.Add(1)
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			response, err := b.ListGroups(&ListGroupsRequest{})
+			if err != nil {
+				errors <- err
+				return
+			}
+
+			groups := make(map[string]string)
+			for group, typ := range response.Groups {
+				groups[group] = typ
+			}
+
+			groupMaps <- groups
+
+		}(b, ca.conf)
+	}
+
+	wg.Wait()
+	close(groupMaps)
+	close(errors)
+
+	for groupMap := range groupMaps {
+		for group, protocolType := range groupMap {
+			allGroups[group] = protocolType
+		}
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errors
+	return
+}
+
+func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
+	coordinator, err := ca.client.Coordinator(group)
+	if err != nil {
+		return nil, err
+	}
+
+	request := &OffsetFetchRequest{
+		ConsumerGroup: group,
+		partitions:    topicPartitions,
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_10_2_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
+		request.Version = 1
+	}
+
+	return coordinator.FetchOffset(request)
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+	coordinator, err := ca.client.Coordinator(group)
+	if err != nil {
+		return err
+	}
+
+	request := &DeleteGroupsRequest{
+		Groups: []string{group},
+	}
+
+	resp, err := coordinator.DeleteGroups(request)
+	if err != nil {
+		return err
+	}
+
+	groupErr, ok := resp.GroupErrorCodes[group]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if groupErr != ErrNoError {
+		return groupErr
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
new file mode 100644
index 0000000..26c275b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -0,0 +1,122 @@
+package sarama
+
+//AlterConfigsRequest is an alter config request type
+type AlterConfigsRequest struct {
+	Resources    []*AlterConfigsResource
+	ValidateOnly bool
+}
+
+//AlterConfigsResource is an alter config resource type
+type AlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]*string
+}
+
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+	return nil
+}
+
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &AlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range a.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (a *AlterConfigsRequest) key() int16 {
+	return 33
+}
+
+func (a *AlterConfigsRequest) version() int16 {
+	return 0
+}
+
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
new file mode 100644
index 0000000..3893663
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -0,0 +1,97 @@
+package sarama
+
+import "time"
+
+//AlterConfigsResponse is a reponse type for alter config
+type AlterConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+//AlterConfigsResourceResponse is a reponse type for alter config resource
+type AlterConfigsResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+}
+
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for i := range a.Resources {
+		pe.putInt16(a.Resources[i].ErrorCode)
+		err := pe.putString(a.Resources[i].ErrorMsg)
+		if err != nil {
+			return nil
+		}
+		pe.putInt8(int8(a.Resources[i].Type))
+		err = pe.putString(a.Resources[i].Name)
+		if err != nil {
+			return nil
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		errCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+		a.Resources[i].ErrorCode = errCode
+
+		e, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		a.Resources[i].ErrorMsg = e
+
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		a.Resources[i].Type = ConfigResourceType(t)
+
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		a.Resources[i].Name = name
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResponse) key() int16 {
+	return 32
+}
+
+func (a *AlterConfigsResponse) version() int16 {
+	return 0
+}
+
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 0000000..b33167c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,25 @@
+package sarama
+
+//ApiVersionsRequest ...
+type ApiVersionsRequest struct {
+}
+
+func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (a *ApiVersionsRequest) key() int16 {
+	return 18
+}
+
+func (a *ApiVersionsRequest) version() int16 {
+	return 0
+}
+
+func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 0000000..bb1f0b3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,89 @@
+package sarama
+
+//ApiVersionsResponseBlock is an api version reponse block type
+type ApiVersionsResponseBlock struct {
+	ApiKey     int16
+	MinVersion int16
+	MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+	pe.putInt16(b.ApiKey)
+	pe.putInt16(b.MinVersion)
+	pe.putInt16(b.MaxVersion)
+	return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+	var err error
+
+	if b.ApiKey, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MinVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MaxVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+//ApiVersionsResponse is an api version response type
+type ApiVersionsResponse struct {
+	Err         KError
+	ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+		return err
+	}
+	for _, apiVersion := range r.ApiVersions {
+		if err := apiVersion.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	numBlocks, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+	for i := 0; i < numBlocks; i++ {
+		block := new(ApiVersionsResponseBlock)
+		if err := block.decode(pd); err != nil {
+			return err
+		}
+		r.ApiVersions[i] = block
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+	return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+	return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 0000000..9b15cd1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,1116 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
+	// when both the Errors and Successes channels have been closed. When calling
+	// AsyncClose, you *must* continue to read from those channels in order to
+	// drain the results of any messages in flight.
+	AsyncClose()
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+
+	// Input is the input channel for the user to write messages to that they
+	// wish to send.
+	Input() chan<- *ProducerMessage
+
+	// Successes is the success output channel back to the user when Return.Successes is
+	// enabled. If Return.Successes is true, you MUST read from this channel or the
+	// Producer will deadlock. It is suggested that you send and read messages
+	// together in a single select statement.
+	Successes() <-chan *ProducerMessage
+
+	// Errors is the error output channel back to the user. You MUST read from this
+	// channel or the Producer will deadlock when the channel is full. Alternatively,
+	// you can set Producer.Return.Errors in your config to false, which prevents
+	// errors to be returned.
+	Errors() <-chan *ProducerError
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+	producerID      int64
+	producerEpoch   int16
+	sequenceNumbers map[string]int32
+	mutex           sync.Mutex
+}
+
+const (
+	noProducerID    = -1
+	noProducerEpoch = -1
+)
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
+	key := fmt.Sprintf("%s-%d", topic, partition)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	sequence := t.sequenceNumbers[key]
+	t.sequenceNumbers[key] = sequence + 1
+	return sequence
+}
+
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+	txnmgr := &transactionManager{
+		producerID:    noProducerID,
+		producerEpoch: noProducerEpoch,
+	}
+
+	if conf.Producer.Idempotent {
+		initProducerIDResponse, err := client.InitProducerID()
+		if err != nil {
+			return nil, err
+		}
+		txnmgr.producerID = initProducerIDResponse.ProducerID
+		txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
+		txnmgr.sequenceNumbers = make(map[string]int32)
+		txnmgr.mutex = sync.Mutex{}
+
+		Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
+	}
+
+	return txnmgr, nil
+}
+
+type asyncProducer struct {
+	client Client
+	conf   *Config
+
+	errors                    chan *ProducerError
+	input, successes, retries chan *ProducerMessage
+	inFlight                  sync.WaitGroup
+
+	brokers    map[*Broker]*brokerProducer
+	brokerRefs map[*brokerProducer]int
+	brokerLock sync.Mutex
+
+	txnmgr *transactionManager
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	return newAsyncProducer(client)
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	txnmgr, err := newTransactionManager(client.Config(), client)
+	if err != nil {
+		return nil, err
+	}
+
+	p := &asyncProducer{
+		client:     client,
+		conf:       client.Config(),
+		errors:     make(chan *ProducerError),
+		input:      make(chan *ProducerMessage),
+		successes:  make(chan *ProducerMessage),
+		retries:    make(chan *ProducerMessage),
+		brokers:    make(map[*Broker]*brokerProducer),
+		brokerRefs: make(map[*brokerProducer]int),
+		txnmgr:     txnmgr,
+	}
+
+	// launch our singleton dispatchers
+	go withRecover(p.dispatcher)
+	go withRecover(p.retryHandler)
+
+	return p, nil
+}
+
+type flagSet int8
+
+const (
+	syn      flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+	fin                          // final message from partitionProducer to brokerProducer and back
+	shutdown                     // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+	Topic string // The Kafka topic for this message.
+	// The partitioning key for this message. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Key Encoder
+	// The actual message to store in Kafka. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Value Encoder
+
+	// The headers are key-value pairs that are transparently passed
+	// by Kafka between producers and consumers.
+	Headers []RecordHeader
+
+	// This field is used to hold arbitrary data you wish to include so it
+	// will be available when receiving on the Successes and Errors channels.
+	// Sarama completely ignores this field and is only to be used for
+	// pass-through data.
+	Metadata interface{}
+
+	// Below this point are filled in by the producer as the message is processed
+
+	// Offset is the offset of the message stored on the broker. This is only
+	// guaranteed to be defined if the message was successfully delivered and
+	// RequiredAcks is not NoResponse.
+	Offset int64
+	// Partition is the partition that the message was sent to. This is only
+	// guaranteed to be defined if the message was successfully delivered.
+	Partition int32
+	// Timestamp can vary in behaviour depending on broker configuration, being
+	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+	// and requiring version at least 0.10.0.
+	//
+	// When configured to CreateTime, the timestamp is specified by the producer
+	// either by explicitly setting this field, or when the message is added
+	// to a produce set.
+	//
+	// When configured to LogAppendTime, the timestamp assigned to the message
+	// by the broker. This is only guaranteed to be defined if the message was
+	// successfully delivered and RequiredAcks is not NoResponse.
+	Timestamp time.Time
+
+	retries        int
+	flags          flagSet
+	expectation    chan *ProducerError
+	sequenceNumber int32
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize(version int) int {
+	var size int
+	if version >= 2 {
+		size = maximumRecordOverhead
+		for _, h := range m.Headers {
+			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+		}
+	} else {
+		size = producerMessageOverhead
+	}
+	if m.Key != nil {
+		size += m.Key.Length()
+	}
+	if m.Value != nil {
+		size += m.Value.Length()
+	}
+	return size
+}
+
+func (m *ProducerMessage) clear() {
+	m.flags = 0
+	m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+	Msg *ProducerMessage
+	Err error
+}
+
+func (pe ProducerError) Error() string {
+	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+	return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+	return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+	return p.input
+}
+
+func (p *asyncProducer) Close() error {
+	p.AsyncClose()
+
+	if p.conf.Producer.Return.Successes {
+		go withRecover(func() {
+			for range p.successes {
+			}
+		})
+	}
+
+	var errors ProducerErrors
+	if p.conf.Producer.Return.Errors {
+		for event := range p.errors {
+			errors = append(errors, event)
+		}
+	} else {
+		<-p.errors
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+	go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+	handlers := make(map[string]chan<- *ProducerMessage)
+	shuttingDown := false
+
+	for msg := range p.input {
+		if msg == nil {
+			Logger.Println("Something tried to send a nil message, it was ignored.")
+			continue
+		}
+
+		if msg.flags&shutdown != 0 {
+			shuttingDown = true
+			p.inFlight.Done()
+			continue
+		} else if msg.retries == 0 {
+			if shuttingDown {
+				// we can't just call returnError here because that decrements the wait group,
+				// which hasn't been incremented yet for this message, and shouldn't be
+				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+				if p.conf.Producer.Return.Errors {
+					p.errors <- pErr
+				} else {
+					Logger.Println(pErr)
+				}
+				continue
+			}
+			p.inFlight.Add(1)
+		}
+
+		version := 1
+		if p.conf.Version.IsAtLeast(V0_11_0_0) {
+			version = 2
+		} else if msg.Headers != nil {
+			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+			continue
+		}
+		if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
+			p.returnError(msg, ErrMessageSizeTooLarge)
+			continue
+		}
+
+		handler := handlers[msg.Topic]
+		if handler == nil {
+			handler = p.newTopicProducer(msg.Topic)
+			handlers[msg.Topic] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range handlers {
+		close(handler)
+	}
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+	parent *asyncProducer
+	topic  string
+	input  <-chan *ProducerMessage
+
+	breaker     *breaker.Breaker
+	handlers    map[int32]chan<- *ProducerMessage
+	partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	tp := &topicProducer{
+		parent:      p,
+		topic:       topic,
+		input:       input,
+		breaker:     breaker.New(3, 1, 10*time.Second),
+		handlers:    make(map[int32]chan<- *ProducerMessage),
+		partitioner: p.conf.Producer.Partitioner(topic),
+	}
+	go withRecover(tp.dispatch)
+	return input
+}
+
+func (tp *topicProducer) dispatch() {
+	for msg := range tp.input {
+		if msg.retries == 0 {
+			if err := tp.partitionMessage(msg); err != nil {
+				tp.parent.returnError(msg, err)
+				continue
+			}
+		}
+		// All messages being retried (sent or not) have already had their retry count updated
+		if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
+			msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+		}
+
+		handler := tp.handlers[msg.Partition]
+		if handler == nil {
+			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+			tp.handlers[msg.Partition] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range tp.handlers {
+		close(handler)
+	}
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+	var partitions []int32
+
+	err := tp.breaker.Run(func() (err error) {
+		var requiresConsistency = false
+		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+			requiresConsistency = ep.MessageRequiresConsistency(msg)
+		} else {
+			requiresConsistency = tp.partitioner.RequiresConsistency()
+		}
+
+		if requiresConsistency {
+			partitions, err = tp.parent.client.Partitions(msg.Topic)
+		} else {
+			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+		}
+		return
+	})
+
+	if err != nil {
+		return err
+	}
+
+	numPartitions := int32(len(partitions))
+
+	if numPartitions == 0 {
+		return ErrLeaderNotAvailable
+	}
+
+	choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+	if err != nil {
+		return err
+	} else if choice < 0 || choice >= numPartitions {
+		return ErrInvalidPartition
+	}
+
+	msg.Partition = partitions[choice]
+
+	return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+	parent    *asyncProducer
+	topic     string
+	partition int32
+	input     <-chan *ProducerMessage
+
+	leader         *Broker
+	breaker        *breaker.Breaker
+	brokerProducer *brokerProducer
+
+	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+	// therefore whether our buffer is complete and safe to flush)
+	highWatermark int
+	retryState    []partitionRetryState
+}
+
+type partitionRetryState struct {
+	buf          []*ProducerMessage
+	expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	pp := &partitionProducer{
+		parent:    p,
+		topic:     topic,
+		partition: partition,
+		input:     input,
+
+		breaker:    breaker.New(3, 1, 10*time.Second),
+		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+	}
+	go withRecover(pp.dispatch)
+	return input
+}
+
+func (pp *partitionProducer) backoff(retries int) {
+	var backoff time.Duration
+	if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
+		maxRetries := pp.parent.conf.Producer.Retry.Max
+		backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
+	} else {
+		backoff = pp.parent.conf.Producer.Retry.Backoff
+	}
+	if backoff > 0 {
+		time.Sleep(backoff)
+	}
+}
+
+func (pp *partitionProducer) dispatch() {
+	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+	// on the first message
+	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+	if pp.leader != nil {
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+	}
+
+	defer func() {
+		if pp.brokerProducer != nil {
+			pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+		}
+	}()
+
+	for msg := range pp.input {
+		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
+			select {
+			case <-pp.brokerProducer.abandoned:
+				// a message on the abandoned channel means that our current broker selection is out of date
+				Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+				pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+				pp.brokerProducer = nil
+				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+			default:
+				// producer connection is still open.
+			}
+		}
+
+		if msg.retries > pp.highWatermark {
+			// a new, higher, retry level; handle it and then back off
+			pp.newHighWatermark(msg.retries)
+			pp.backoff(msg.retries)
+		} else if pp.highWatermark > 0 {
+			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+			if msg.retries < pp.highWatermark {
+				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+				if msg.flags&fin == fin {
+					pp.retryState[msg.retries].expectChaser = false
+					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				} else {
+					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+				}
+				continue
+			} else if msg.flags&fin == fin {
+				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+				// meaning this retry level is done and we can go down (at least) one level and flush that
+				pp.retryState[pp.highWatermark].expectChaser = false
+				pp.flushRetryBuffers()
+				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				continue
+			}
+		}
+
+		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+		// without breaking any of our ordering guarantees
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnError(msg, err)
+				pp.backoff(msg.retries)
+				continue
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		pp.brokerProducer.input <- msg
+	}
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+	pp.highWatermark = hwm
+
+	// send off a fin so that we know when everything "in between" has made it
+	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+	pp.retryState[pp.highWatermark].expectChaser = true
+	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+	// a new HWM means that our current broker selection is out of date
+	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+	pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+	for {
+		pp.highWatermark--
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+				goto flushDone
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		for _, msg := range pp.retryState[pp.highWatermark].buf {
+			pp.brokerProducer.input <- msg
+		}
+
+	flushDone:
+		pp.retryState[pp.highWatermark].buf = nil
+		if pp.retryState[pp.highWatermark].expectChaser {
+			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+			break
+		} else if pp.highWatermark == 0 {
+			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+			break
+		}
+	}
+}
+
+func (pp *partitionProducer) updateLeader() error {
+	return pp.breaker.Run(func() (err error) {
+		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+			return err
+		}
+
+		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+			return err
+		}
+
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+		return nil
+	})
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+	var (
+		input     = make(chan *ProducerMessage)
+		bridge    = make(chan *produceSet)
+		responses = make(chan *brokerProducerResponse)
+	)
+
+	bp := &brokerProducer{
+		parent:         p,
+		broker:         broker,
+		input:          input,
+		output:         bridge,
+		responses:      responses,
+		stopchan:       make(chan struct{}),
+		buffer:         newProduceSet(p),
+		currentRetries: make(map[string]map[int32]error),
+	}
+	go withRecover(bp.run)
+
+	// minimal bridge to make the network response `select`able
+	go withRecover(func() {
+		for set := range bridge {
+			request := set.buildRequest()
+
+			response, err := broker.Produce(request)
+
+			responses <- &brokerProducerResponse{
+				set: set,
+				err: err,
+				res: response,
+			}
+		}
+		close(responses)
+	})
+
+	if p.conf.Producer.Retry.Max <= 0 {
+		bp.abandoned = make(chan struct{})
+	}
+
+	return bp
+}
+
+type brokerProducerResponse struct {
+	set *produceSet
+	err error
+	res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+	parent *asyncProducer
+	broker *Broker
+
+	input     chan *ProducerMessage
+	output    chan<- *produceSet
+	responses <-chan *brokerProducerResponse
+	abandoned chan struct{}
+	stopchan  chan struct{}
+
+	buffer     *produceSet
+	timer      <-chan time.Time
+	timerFired bool
+
+	closing        error
+	currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+	var output chan<- *produceSet
+	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+	for {
+		select {
+		case msg, ok := <-bp.input:
+			if !ok {
+				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
+				bp.shutdown()
+				return
+			}
+
+			if msg == nil {
+				continue
+			}
+
+			if msg.flags&syn == syn {
+				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+					bp.broker.ID(), msg.Topic, msg.Partition)
+				if bp.currentRetries[msg.Topic] == nil {
+					bp.currentRetries[msg.Topic] = make(map[int32]error)
+				}
+				bp.currentRetries[msg.Topic][msg.Partition] = nil
+				bp.parent.inFlight.Done()
+				continue
+			}
+
+			if reason := bp.needsRetry(msg); reason != nil {
+				bp.parent.retryMessage(msg, reason)
+
+				if bp.closing == nil && msg.flags&fin == fin {
+					// we were retrying this partition but we can start processing again
+					delete(bp.currentRetries[msg.Topic], msg.Partition)
+					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+						bp.broker.ID(), msg.Topic, msg.Partition)
+				}
+
+				continue
+			}
+
+			if bp.buffer.wouldOverflow(msg) {
+				if err := bp.waitForSpace(msg); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+
+			if err := bp.buffer.add(msg); err != nil {
+				bp.parent.returnError(msg, err)
+				continue
+			}
+
+			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+				bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+			}
+		case <-bp.timer:
+			bp.timerFired = true
+		case output <- bp.buffer:
+			bp.rollOver()
+		case response, ok := <-bp.responses:
+			if ok {
+				bp.handleResponse(response)
+			}
+		case <-bp.stopchan:
+			Logger.Printf(
+				"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
+			return
+		}
+
+		if bp.timerFired || bp.buffer.readyToFlush() {
+			output = bp.output
+		} else {
+			output = nil
+		}
+	}
+}
+
+func (bp *brokerProducer) shutdown() {
+	for !bp.buffer.empty() {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+		}
+	}
+	close(bp.output)
+	for response := range bp.responses {
+		bp.handleResponse(response)
+	}
+	close(bp.stopchan)
+	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+	if bp.closing != nil {
+		return bp.closing
+	}
+
+	return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+	Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+	for {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+			// handling a response can change our state, so re-check some things
+			if reason := bp.needsRetry(msg); reason != nil {
+				return reason
+			} else if !bp.buffer.wouldOverflow(msg) {
+				return nil
+			}
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+			return nil
+		}
+	}
+}
+
+func (bp *brokerProducer) rollOver() {
+	bp.timer = nil
+	bp.timerFired = false
+	bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+	if response.err != nil {
+		bp.handleError(response.set, response.err)
+	} else {
+		bp.handleSuccess(response.set, response.res)
+	}
+
+	if bp.buffer.empty() {
+		bp.rollOver() // this can happen if the response invalidated our buffer
+	}
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+	// we iterate through the blocks in the request set, not the response, so that we notice
+	// if the response is missing a block completely
+	var retryTopics []string
+	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+		if response == nil {
+			// this only happens when RequiredAcks is NoResponse, so we have to assume success
+			bp.parent.returnSuccesses(pSet.msgs)
+			return
+		}
+
+		block := response.GetBlock(topic, partition)
+		if block == nil {
+			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+			return
+		}
+
+		switch block.Err {
+		// Success
+		case ErrNoError:
+			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+				for _, msg := range pSet.msgs {
+					msg.Timestamp = block.Timestamp
+				}
+			}
+			for i, msg := range pSet.msgs {
+				msg.Offset = block.Offset + int64(i)
+			}
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Duplicate
+		case ErrDuplicateSequenceNumber:
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Retriable errors
+		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+				bp.parent.returnErrors(pSet.msgs, block.Err)
+			} else {
+				retryTopics = append(retryTopics, topic)
+			}
+		// Other non-retriable errors
+		default:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+			}
+			bp.parent.returnErrors(pSet.msgs, block.Err)
+		}
+	})
+
+	if len(retryTopics) > 0 {
+		if bp.parent.conf.Producer.Idempotent {
+			err := bp.parent.client.RefreshMetadata(retryTopics...)
+			if err != nil {
+				Logger.Printf("Failed refreshing metadata because of %v\n", err)
+			}
+		}
+
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			block := response.GetBlock(topic, partition)
+			if block == nil {
+				// handled in the previous "eachPartition" loop
+				return
+			}
+
+			switch block.Err {
+			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+					bp.broker.ID(), topic, partition, block.Err)
+				if bp.currentRetries[topic] == nil {
+					bp.currentRetries[topic] = make(map[int32]error)
+				}
+				bp.currentRetries[topic][partition] = block.Err
+				if bp.parent.conf.Producer.Idempotent {
+					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+				} else {
+					bp.parent.retryMessages(pSet.msgs, block.Err)
+				}
+				// dropping the following messages has the side effect of incrementing their retry count
+				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+			}
+		})
+	}
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+	produceSet := newProduceSet(p)
+	produceSet.msgs[topic] = make(map[int32]*partitionSet)
+	produceSet.msgs[topic][partition] = pSet
+	produceSet.bufferBytes += pSet.bufferBytes
+	produceSet.bufferCount += len(pSet.msgs)
+	for _, msg := range pSet.msgs {
+		if msg.retries >= p.conf.Producer.Retry.Max {
+			p.returnError(msg, kerr)
+			return
+		}
+		msg.retries++
+	}
+
+	// it's expected that a metadata refresh has been requested prior to calling retryBatch
+	leader, err := p.client.Leader(topic, partition)
+	if err != nil {
+		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+		for _, msg := range pSet.msgs {
+			p.returnError(msg, kerr)
+		}
+		return
+	}
+	bp := p.getBrokerProducer(leader)
+	bp.output <- produceSet
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+	switch err.(type) {
+	case PacketEncodingError:
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.returnErrors(pSet.msgs, err)
+		})
+	default:
+		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+		bp.parent.abandonBrokerConnection(bp.broker)
+		_ = bp.broker.Close()
+		bp.closing = err
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.rollOver()
+	}
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+	var msg *ProducerMessage
+	buf := queue.New()
+
+	for {
+		if buf.Length() == 0 {
+			msg = <-p.retries
+		} else {
+			select {
+			case msg = <-p.retries:
+			case p.input <- buf.Peek().(*ProducerMessage):
+				buf.Remove()
+				continue
+			}
+		}
+
+		if msg == nil {
+			return
+		}
+
+		buf.Add(msg)
+	}
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+	Logger.Println("Producer shutting down.")
+	p.inFlight.Add(1)
+	p.input <- &ProducerMessage{flags: shutdown}
+
+	p.inFlight.Wait()
+
+	err := p.client.Close()
+	if err != nil {
+		Logger.Println("producer/shutdown failed to close the embedded client:", err)
+	}
+
+	close(p.input)
+	close(p.retries)
+	close(p.errors)
+	close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	msg.clear()
+	pErr := &ProducerError{Msg: msg, Err: err}
+	if p.conf.Producer.Return.Errors {
+		p.errors <- pErr
+	} else {
+		Logger.Println(pErr)
+	}
+	p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.returnError(msg, err)
+	}
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+	for _, msg := range batch {
+		if p.conf.Producer.Return.Successes {
+			msg.clear()
+			p.successes <- msg
+		}
+		p.inFlight.Done()
+	}
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+	if msg.retries >= p.conf.Producer.Retry.Max {
+		p.returnError(msg, err)
+	} else {
+		msg.retries++
+		p.retries <- msg
+	}
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.retryMessage(msg, err)
+	}
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bp := p.brokers[broker]
+
+	if bp == nil {
+		bp = p.newBrokerProducer(broker)
+		p.brokers[broker] = bp
+		p.brokerRefs[bp] = 0
+	}
+
+	p.brokerRefs[bp]++
+
+	return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	p.brokerRefs[bp]--
+	if p.brokerRefs[bp] == 0 {
+		close(bp.input)
+		delete(p.brokerRefs, bp)
+
+		if p.brokers[broker] == bp {
+			delete(p.brokers, broker)
+		}
+	}
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bc, ok := p.brokers[broker]
+	if ok && bc.abandoned != nil {
+		close(bc.abandoned)
+	}
+
+	delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
new file mode 100644
index 0000000..67c4d96
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -0,0 +1,1053 @@
+package sarama
+
+import (
+	"container/heap"
+	"math"
+	"sort"
+	"strings"
+)
+
+const (
+	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+	RangeBalanceStrategyName = "range"
+
+	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+	RoundRobinBalanceStrategyName = "roundrobin"
+
+	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+	StickyBalanceStrategyName = "sticky"
+
+	defaultGeneration = -1
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+	if len(partitions) == 0 {
+		return
+	}
+	if _, ok := p[memberID]; !ok {
+		p[memberID] = make(map[string][]int32, 1)
+	}
+	p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across members of a consumer group
+type BalanceStrategy interface {
+	// Name uniquely identifies the strategy.
+	Name() string
+
+	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+	// and returns a distribution plan.
+	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
+// Example with one topic T with six partitions (0..5) and two members (M1, M2):
+//   M1: {T: [0, 1, 2]}
+//   M2: {T: [3, 4, 5]}
+var BalanceStrategyRange = &balanceStrategy{
+	name: RangeBalanceStrategyName,
+	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+		step := float64(len(partitions)) / float64(len(memberIDs))
+
+		for i, memberID := range memberIDs {
+			pos := float64(i)
+			min := int(math.Floor(pos*step + 0.5))
+			max := int(math.Floor((pos+1)*step + 0.5))
+			plan.Add(memberID, topic, partitions[min:max]...)
+		}
+	},
+}
+
+// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//   M1: {T: [0, 2, 4]}
+//   M2: {T: [1, 3, 5]}
+var BalanceStrategyRoundRobin = &balanceStrategy{
+	name: RoundRobinBalanceStrategyName,
+	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+		for i, part := range partitions {
+			memberID := memberIDs[i%len(memberIDs)]
+			plan.Add(memberID, topic, part)
+		}
+	},
+}
+
+// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//   M1: {T: [0, 2, 4]}
+//   M2: {T: [1, 3, 5]}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//   M1: {T: [0, 2]}
+//   M2: {T: [1, 3]}
+//   M3: {T: [4, 5]}
+//
+var BalanceStrategySticky = &stickyBalanceStrategy{}
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+	name   string
+	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Plan implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// Build members by topic map
+	mbt := make(map[string][]string)
+	for memberID, meta := range members {
+		for _, topic := range meta.Topics {
+			mbt[topic] = append(mbt[topic], memberID)
+		}
+	}
+
+	// Sort members for each topic
+	for topic, memberIDs := range mbt {
+		sort.Sort(&balanceStrategySortable{
+			topic:     topic,
+			memberIDs: memberIDs,
+		})
+	}
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(members))
+	for topic, memberIDs := range mbt {
+		s.coreFn(plan, memberIDs, topic, topics[topic])
+	}
+	return plan, nil
+}
+
+type balanceStrategySortable struct {
+	topic     string
+	memberIDs []string
+}
+
+func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
+func (p balanceStrategySortable) Swap(i, j int) {
+	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
+}
+func (p balanceStrategySortable) Less(i, j int) bool {
+	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
+}
+
+func balanceStrategyHashValue(vv ...string) uint32 {
+	h := uint32(2166136261)
+	for _, s := range vv {
+		for _, c := range s {
+			h ^= uint32(c)
+			h *= 16777619
+		}
+	}
+	return h
+}
+
+type stickyBalanceStrategy struct {
+	movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// track partition movements during generation of the partition assignment plan
+	s.movements = partitionMovements{
+		Movements:                 make(map[topicPartitionAssignment]consumerPair),
+		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+	}
+
+	// prepopulate the current assignment state from userdata on the consumer group members
+	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+	if err != nil {
+		return nil, err
+	}
+
+	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+	isFreshAssignment := false
+	if len(currentAssignment) == 0 {
+		isFreshAssignment = true
+	}
+
+	// create a mapping of all current topic partitions and the consumers that can be assigned to them
+	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+		}
+	}
+
+	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
+	// also, populate the mapping of partitions to potential consumers
+	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+	for memberID, meta := range members {
+		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+		for _, topicSubscription := range meta.Topics {
+			// only evaluate topic subscriptions that are present in the supplied topics map
+			if _, found := topics[topicSubscription]; found {
+				for _, partition := range topics[topicSubscription] {
+					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+				}
+			}
+		}
+
+		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+		if _, exists := currentAssignment[memberID]; !exists {
+			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+		}
+	}
+
+	// create a mapping of each partition to its current consumer, where possible
+	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unvisitedPartitions[partition] = true
+	}
+	var unassignedPartitions []topicPartitionAssignment
+	for memberID, partitions := range currentAssignment {
+		var keepPartitions []topicPartitionAssignment
+		for _, partition := range partitions {
+			// If this partition no longer exists at all, likely due to the
+			// topic being deleted, we remove the partition from the member.
+			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+				continue
+			}
+			delete(unvisitedPartitions, partition)
+			currentPartitionConsumers[partition] = memberID
+
+			if !strsContains(members[memberID].Topics, partition.Topic) {
+				unassignedPartitions = append(unassignedPartitions, partition)
+				continue
+			}
+			keepPartitions = append(keepPartitions, partition)
+		}
+		currentAssignment[memberID] = keepPartitions
+	}
+	for unvisited := range unvisitedPartitions {
+		unassignedPartitions = append(unassignedPartitions, unvisited)
+	}
+
+	// sort the topic partitions in order of priority for reassignment
+	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+	// at this point we have preserved all valid topic partition to consumer assignments and removed
+	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+	// to consumers so that the topic partition assignments are as balanced as possible.
+
+	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(currentAssignment))
+	for memberID, assignments := range currentAssignment {
+		if len(assignments) == 0 {
+			plan[memberID] = make(map[string][]int32, 0)
+		} else {
+			for _, assignment := range assignments {
+				plan.Add(memberID, assignment.Topic, assignment.Partition)
+			}
+		}
+	}
+	return plan, nil
+}
+
+func strsContains(s []string, value string) bool {
+	for _, entry := range s {
+		if entry == value {
+			return true
+		}
+	}
+	return false
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+	initializing := false
+	if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
+		initializing = true
+	}
+
+	// assign all unassigned partitions
+	for _, partition := range unassignedPartitions {
+		// skip if there is no potential consumer for the partition
+		if len(partition2AllPotentialConsumers[partition]) == 0 {
+			continue
+		}
+		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+	}
+
+	// narrow down the reassignment scope to only those partitions that can actually be reassigned
+	for partition := range partition2AllPotentialConsumers {
+		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+		}
+	}
+
+	// narrow down the reassignment scope to only those consumers that are subject to reassignment
+	fixedAssignments := make(map[string][]topicPartitionAssignment)
+	for memberID := range consumer2AllPotentialPartitions {
+		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+			fixedAssignments[memberID] = currentAssignment[memberID]
+			delete(currentAssignment, memberID)
+			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+		}
+	}
+
+	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+	preBalanceAssignment := deepCopyAssignment(currentAssignment)
+	preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
+	for k, v := range currentPartitionConsumer {
+		preBalancePartitionConsumers[k] = v
+	}
+
+	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+	// if we are not preserving existing assignments and we have made changes to the current assignment
+	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+		currentAssignment = deepCopyAssignment(preBalanceAssignment)
+		currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
+		for k, v := range preBalancePartitionConsumers {
+			currentPartitionConsumer[k] = v
+		}
+	}
+
+	// add the fixed assignments (those that could not change) back
+	for consumer, assignments := range fixedAssignments {
+		currentAssignment[consumer] = assignments
+	}
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+	consumer2AssignmentSize := make(map[string]int, len(assignment))
+	for memberID, partitions := range assignment {
+		consumer2AssignmentSize[memberID] = len(partitions)
+	}
+
+	var score float64
+	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+		delete(consumer2AssignmentSize, memberID)
+		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+		}
+	}
+	return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, allSubscriptions map[string][]topicPartitionAssignment) bool {
+	sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+	if min >= max-1 {
+		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+		return true
+	}
+
+	// create a mapping from partitions to the consumer assigned to them
+	allPartitions := make(map[topicPartitionAssignment]string)
+	for memberID, partitions := range currentAssignment {
+		for _, partition := range partitions {
+			if _, exists := allPartitions[partition]; exists {
+				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+			}
+			allPartitions[partition] = memberID
+		}
+	}
+
+	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+	// could but did not get cannot be moved to it (because that would break the balance)
+	for _, memberID := range sortedCurrentSubscriptions {
+		consumerPartitions := currentAssignment[memberID]
+		consumerPartitionCount := len(consumerPartitions)
+
+		// skip if this consumer already has all the topic partitions it can get
+		if consumerPartitionCount == len(allSubscriptions[memberID]) {
+			continue
+		}
+
+		// otherwise make sure it cannot get any more
+		potentialTopicPartitions := allSubscriptions[memberID]
+		for _, partition := range potentialTopicPartitions {
+			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+				otherConsumer := allPartitions[partition]
+				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+				if consumerPartitionCount < otherConsumerPartitionCount {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+	reassignmentPerformed := false
+	modified := false
+
+	// repeat reassignment until no partition can be moved to improve the balance
+	for {
+		modified = false
+		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+		// until the full list is processed or a balance is achieved
+		for _, partition := range reassignablePartitions {
+			if isBalanced(currentAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions) {
+				break
+			}
+
+			// the partition must have at least two consumers
+			if len(partition2AllPotentialConsumers[partition]) <= 1 {
+				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+			}
+
+			// the partition must have a consumer
+			consumer := currentPartitionConsumer[partition]
+			if consumer == "" {
+				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+			}
+
+			if _, exists := prevAssignment[partition]; exists {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+					reassignmentPerformed = true
+					modified = true
+					continue
+				}
+			}
+
+			// check if a better-suited consumer exists for the partition; if so, reassign it
+			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+					reassignmentPerformed = true
+					modified = true
+					break
+				}
+			}
+		}
+		if !modified {
+			return reassignmentPerformed
+		}
+	}
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+	for _, anotherConsumer := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+		}
+	}
+	return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+	consumer := currentPartitionConsumer[partition]
+	// find the correct partition movement considering the stickiness requirement
+	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	oldConsumer := currentPartitionConsumer[partition]
+	s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+	currentPartitionConsumer[partition] = newConsumer
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	currentPartitions := currentAssignment[memberID]
+	currentAssignmentSize := len(currentPartitions)
+	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+	if currentAssignmentSize > maxAssignmentSize {
+		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+	}
+	if currentAssignmentSize < maxAssignmentSize {
+		// if a consumer is not assigned all its potential partitions it is subject to reassignment
+		return true
+	}
+	for _, partition := range currentPartitions {
+		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			return true
+		}
+	}
+	return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	for _, memberID := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+			currentPartitionConsumer[partition] = memberID
+			break
+		}
+	}
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+	userDataV1 := &StickyAssignorUserDataV1{}
+	if err := decode(userDataBytes, userDataV1); err != nil {
+		userDataV0 := &StickyAssignorUserDataV0{}
+		if err := decode(userDataBytes, userDataV0); err != nil {
+			return nil, err
+		}
+		return userDataV0, nil
+	}
+	return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+	assignments := deepCopyAssignment(currentAssignment)
+	for memberID, partitions := range assignments {
+		// perform in-place filtering
+		i := 0
+		for _, partition := range partitions {
+			if _, exists := partition2AllPotentialConsumers[partition]; exists {
+				partitions[i] = partition
+				i++
+			}
+		}
+		assignments[memberID] = partitions[:i]
+	}
+	return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+	for i, assignment := range assignments {
+		if assignment == topic {
+			return append(assignments[:i], assignments[i+1:]...)
+		}
+	}
+	return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+	for _, assignment := range assignments {
+		if assignment == topic {
+			return true
+		}
+	}
+	return false
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unassignedPartitions[partition] = true
+	}
+
+	sortedPartitions := make([]topicPartitionAssignment, 0)
+	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+		// then we just need to simply list partitions in a round robin fashion (from consumers with
+		// most assigned partitions to those with least)
+		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+		// use priority-queue to evaluate consumer group members in descending-order based on
+		// the number of topic partition assignments (i.e. consumers with most assignments first)
+		pq := make(assignmentPriorityQueue, len(assignments))
+		i := 0
+		for consumerID, consumerAssignments := range assignments {
+			pq[i] = &consumerGroupMember{
+				id:          consumerID,
+				assignments: consumerAssignments,
+			}
+			i++
+		}
+		heap.Init(&pq)
+
+		for {
+			// loop until no consumer-group members remain
+			if pq.Len() == 0 {
+				break
+			}
+			member := pq[0]
+
+			// partitions that were assigned to a different consumer last time
+			var prevPartitionIndex int
+			for i, partition := range member.assignments {
+				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+					prevPartitionIndex = i
+					break
+				}
+			}
+
+			if len(member.assignments) > 0 {
+				partition := member.assignments[prevPartitionIndex]
+				sortedPartitions = append(sortedPartitions, partition)
+				delete(unassignedPartitions, partition)
+				if prevPartitionIndex == 0 {
+					member.assignments = member.assignments[1:]
+				} else {
+					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+				}
+				heap.Fix(&pq, 0)
+			} else {
+				heap.Pop(&pq)
+			}
+		}
+
+		for partition := range unassignedPartitions {
+			sortedPartitions = append(sortedPartitions, partition)
+		}
+	} else {
+		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
+		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+	}
+	return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+	// sort the members by the number of partition assignments in ascending order
+	sortedMemberIDs := make([]string, 0, len(assignments))
+	for memberID := range assignments {
+		sortedMemberIDs = append(sortedMemberIDs, memberID)
+	}
+	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+		if ret == 0 {
+			return sortedMemberIDs[i] < sortedMemberIDs[j]
+		}
+		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+	})
+	return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+	// sort the members by the number of partition assignments in descending order
+	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+	i := 0
+	for partition := range partition2AllPotentialConsumers {
+		sortedPartionIDs[i] = partition
+		i++
+	}
+	sort.Slice(sortedPartionIDs, func(i, j int) bool {
+		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+			if ret == 0 {
+				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+			}
+			return ret < 0
+		}
+		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+	})
+	return sortedPartionIDs
+}
+
+func deepCopyPartitions(src []topicPartitionAssignment) []topicPartitionAssignment {
+	dst := make([]topicPartitionAssignment, len(src))
+	for i, partition := range src {
+		dst[i] = partition
+	}
+	return dst
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+	copy := make(map[string][]topicPartitionAssignment, len(assignment))
+	for memberID, subscriptions := range assignment {
+		copy[memberID] = append(subscriptions[:0:0], subscriptions...)
+	}
+	return copy
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+	curMembers := make(map[string]int)
+	for _, cur := range partition2AllPotentialConsumers {
+		if len(curMembers) == 0 {
+			for _, curMembersElem := range cur {
+				curMembers[curMembersElem]++
+			}
+			continue
+		}
+
+		if len(curMembers) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[string]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curMembers {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+
+	curPartitions := make(map[topicPartitionAssignment]int)
+	for _, cur := range consumer2AllPotentialPartitions {
+		if len(curPartitions) == 0 {
+			for _, curPartitionElem := range cur {
+				curPartitions[curPartitionElem]++
+			}
+			continue
+		}
+
+		if len(curPartitions) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[topicPartitionAssignment]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curPartitions {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+	currentAssignment := make(map[string][]topicPartitionAssignment)
+	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+	// for each partition we create a sorted map of its consumers by generation
+	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+	for memberID, meta := range members {
+		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, partition := range consumerUserData.partitions() {
+			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+				if consumerUserData.hasGeneration() {
+					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+						// same partition is assigned to two consumers during the same rebalance.
+						// log a warning and skip this record
+						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+						continue
+					} else {
+						consumers[consumerUserData.generation()] = memberID
+					}
+				} else {
+					consumers[defaultGeneration] = memberID
+				}
+			} else {
+				generation := defaultGeneration
+				if consumerUserData.hasGeneration() {
+					generation = consumerUserData.generation()
+				}
+				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+			}
+		}
+	}
+
+	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+	// current and previous consumers are the last two consumers of each partition in the above sorted map
+	for partition, consumers := range sortedPartitionConsumersByGeneration {
+		// sort consumers by generation in decreasing order
+		var generations []int
+		for generation := range consumers {
+			generations = append(generations, generation)
+		}
+		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+		consumer := consumers[generations[0]]
+		if _, exists := currentAssignment[consumer]; !exists {
+			currentAssignment[consumer] = []topicPartitionAssignment{partition}
+		} else {
+			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+		}
+
+		// check for previous assignment, if any
+		if len(generations) > 1 {
+			prevAssignment[partition] = consumerGenerationPair{
+				MemberID:   consumers[generations[1]],
+				Generation: generations[1],
+			}
+		}
+	}
+	return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+	MemberID   string
+	Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+	SrcMemberID string
+	DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+	Movements                 map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+	pair := p.Movements[partition]
+	delete(p.Movements, partition)
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	delete(partitionMovementsForThisTopic[pair], partition)
+	if len(partitionMovementsForThisTopic[pair]) == 0 {
+		delete(partitionMovementsForThisTopic, pair)
+	}
+	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+		delete(p.PartitionMovementsByTopic, partition.Topic)
+	}
+	return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+	p.Movements[partition] = pair
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+	pair := consumerPair{
+		SrcMemberID: oldConsumer,
+		DstMemberID: newConsumer,
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		existingPair := p.removeMovementRecordOfPartition(partition)
+		if existingPair.DstMemberID != oldConsumer {
+			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+		}
+		if existingPair.SrcMemberID != newConsumer {
+			// the partition is not moving back to its previous consumer
+			p.addPartitionMovementRecord(partition, consumerPair{
+				SrcMemberID: existingPair.SrcMemberID,
+				DstMemberID: newConsumer,
+			})
+		}
+	} else {
+		p.addPartitionMovementRecord(partition, pair)
+	}
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		return partition
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		if oldConsumer != p.Movements[partition].DstMemberID {
+			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+		}
+		oldConsumer = p.Movements[partition].SrcMemberID
+	}
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	reversePair := consumerPair{
+		SrcMemberID: newConsumer,
+		DstMemberID: oldConsumer,
+	}
+	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+		return partition
+	}
+	var reversePairPartition topicPartitionAssignment
+	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+		reversePairPartition = otherPartition
+	}
+	return reversePairPartition
+}
+
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+	if src == dst {
+		return currentPath, false
+	}
+	if len(pairs) == 0 {
+		return currentPath, false
+	}
+	for _, pair := range pairs {
+		if src == pair.SrcMemberID && dst == pair.DstMemberID {
+			currentPath = append(currentPath, src, dst)
+			return currentPath, true
+		}
+	}
+
+	for _, pair := range pairs {
+		if pair.SrcMemberID == src {
+			// create a deep copy of the pairs, excluding the current pair
+			reducedSet := make([]consumerPair, len(pairs)-1)
+			i := 0
+			for _, p := range pairs {
+				if p != pair {
+					reducedSet[i] = pair
+					i++
+				}
+			}
+
+			currentPath = append(currentPath, pair.SrcMemberID)
+			return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+		}
+	}
+	return currentPath, false
+}
+
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+	superCycle := make([]string, len(cycle)-1)
+	for i := 0; i < len(cycle)-1; i++ {
+		superCycle[i] = cycle[i]
+	}
+	for _, c := range cycle {
+		superCycle = append(superCycle, c)
+	}
+	for _, foundCycle := range cycles {
+		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+	cycles := make([][]string, 0)
+	for _, pair := range pairs {
+		// create a deep copy of the pairs, excluding the current pair
+		reducedPairs := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedPairs[i] = pair
+				i++
+			}
+		}
+		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+			if !p.in(path, cycles) {
+				cycles = append(cycles, path)
+				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+			}
+		}
+	}
+
+	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+	for _, cycle := range cycles {
+		if len(cycle) == 3 {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *partitionMovements) isSticky() bool {
+	for topic, movements := range p.PartitionMovementsByTopic {
+		movementPairs := make([]consumerPair, len(movements))
+		i := 0
+		for pair := range movements {
+			movementPairs[i] = pair
+			i++
+		}
+		if p.hasCycles(movementPairs) {
+			Logger.Printf("Stickiness is violated for topic %s", topic)
+			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+			return false
+		}
+	}
+	return true
+}
+
+func indexOfSubList(source []string, target []string) int {
+	targetSize := len(target)
+	maxCandidate := len(source) - targetSize
+nextCand:
+	for candidate := 0; candidate <= maxCandidate; candidate++ {
+		j := candidate
+		for i := 0; i < targetSize; i++ {
+			if target[i] != source[j] {
+				// Element mismatch, try next cand
+				continue nextCand
+			}
+			j++
+		}
+		// All elements of candidate matched target
+		return candidate
+	}
+	return -1
+}
+
+type consumerGroupMember struct {
+	id          string
+	assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+	// order asssignment priority queue in descending order using assignment-count/member-id
+	if len(pq[i].assignments) == len(pq[j].assignments) {
+		return strings.Compare(pq[i].id, pq[j].id) > 0
+	}
+	return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+	member := x.(*consumerGroupMember)
+	*pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	member := old[n-1]
+	*pq = old[0 : n-1]
+	return member
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 0000000..8146749
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,1354 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	metrics "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+	conf *Config
+	rack *string
+
+	id            int32
+	addr          string
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
+	opened        int32
+	responses     chan responsePromise
+	done          chan bool
+
+	registeredMetrics []string
+
+	incomingByteRate       metrics.Meter
+	requestRate            metrics.Meter
+	requestSize            metrics.Histogram
+	requestLatency         metrics.Histogram
+	outgoingByteRate       metrics.Meter
+	responseRate           metrics.Meter
+	responseSize           metrics.Histogram
+	brokerIncomingByteRate metrics.Meter
+	brokerRequestRate      metrics.Meter
+	brokerRequestSize      metrics.Histogram
+	brokerRequestLatency   metrics.Histogram
+	brokerOutgoingByteRate metrics.Meter
+	brokerResponseRate     metrics.Meter
+	brokerResponseSize     metrics.Histogram
+
+	kerberosAuthenticator GSSAPIKerberosAuth
+}
+
+// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
+type SASLMechanism string
+
+const (
+	// SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
+	SASLTypeOAuth = "OAUTHBEARER"
+	// SASLTypePlaintext represents the SASL/PLAIN mechanism
+	SASLTypePlaintext = "PLAIN"
+	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+	SASLTypeGSSAPI      = "GSSAPI"
+	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL auth using opaque packets.
+	SASLHandshakeV0 = int16(0)
+	// SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
+	SASLHandshakeV1 = int16(1)
+	// SASLExtKeyAuth is the reserved extension key name sent as part of the
+	// SASL/OAUTHBEARER intial client response
+	SASLExtKeyAuth = "auth"
+)
+
+// AccessToken contains an access token used to authenticate a
+// SASL/OAUTHBEARER client along with associated metadata.
+type AccessToken struct {
+	// Token is the access token payload.
+	Token string
+	// Extensions is a optional map of arbitrary key-value pairs that can be
+	// sent with the SASL/OAUTHBEARER initial client response. These values are
+	// ignored by the SASL server if they are unexpected. This feature is only
+	// supported by Kafka >= 2.1.0.
+	Extensions map[string]string
+}
+
+// AccessTokenProvider is the interface that encapsulates how implementors
+// can generate access tokens for Kafka broker authentication.
+type AccessTokenProvider interface {
+	// Token returns an access token. The implementation should ensure token
+	// reuse so that multiple calls at connect time do not create multiple
+	// tokens. The implementation should also periodically refresh the token in
+	// order to guarantee that each call returns an unexpired token.  This
+	// method should not block indefinitely--a timeout error should be returned
+	// after a short period of inactivity so that the broker connection logic
+	// can log debugging information and retry.
+	Token() (*AccessToken, error)
+}
+
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+	// Begin prepares the client for the SCRAM exchange
+	// with the server with a user name and a password
+	Begin(userName, password, authzID string) error
+	// Step steps client through the SCRAM exchange. It is
+	// called repeatedly until it errors or `Done` returns true.
+	Step(challenge string) (response string, err error)
+	// Done should return true when the SCRAM conversation
+	// is over.
+	Done() bool
+}
+
+type responsePromise struct {
+	requestTime   time.Time
+	correlationID int32
+	packets       chan []byte
+	errors        chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+	return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+	if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+		return ErrAlreadyConnected
+	}
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	err := conf.Validate()
+	if err != nil {
+		return err
+	}
+
+	b.lock.Lock()
+
+	go withRecover(func() {
+		defer b.lock.Unlock()
+
+		dialer := net.Dialer{
+			Timeout:   conf.Net.DialTimeout,
+			KeepAlive: conf.Net.KeepAlive,
+			LocalAddr: conf.Net.LocalAddr,
+		}
+
+		if conf.Net.TLS.Enable {
+			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+		} else if conf.Net.Proxy.Enable {
+			b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
+		} else {
+			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+		}
+		if b.connErr != nil {
+			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+			b.conn = nil
+			atomic.StoreInt32(&b.opened, 0)
+			return
+		}
+		b.conn = newBufConn(b.conn)
+
+		b.conf = conf
+
+		// Create or reuse the global metrics shared between brokers
+		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+		b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+		b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
+		// the same id (-1) and are already exposed through the global metrics above
+		if b.id >= 0 {
+			b.registerMetrics()
+		}
+
+		if conf.Net.SASL.Enable {
+
+			b.connErr = b.authenticateViaSASL()
+
+			if b.connErr != nil {
+				err = b.conn.Close()
+				if err == nil {
+					Logger.Printf("Closed connection to broker %s\n", b.addr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+				}
+				b.conn = nil
+				atomic.StoreInt32(&b.opened, 0)
+				return
+			}
+		}
+
+		b.done = make(chan bool)
+		b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+		if b.id >= 0 {
+			Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+		} else {
+			Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+		}
+		go withRecover(b.responseReceiver)
+	})
+
+	return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.conn != nil, b.connErr
+}
+
+//Close closes the broker resources
+func (b *Broker) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return ErrNotConnected
+	}
+
+	close(b.responses)
+	<-b.done
+
+	err := b.conn.Close()
+
+	b.conn = nil
+	b.connErr = nil
+	b.done = nil
+	b.responses = nil
+
+	b.unregisterMetrics()
+
+	if err == nil {
+		Logger.Printf("Closed connection to broker %s\n", b.addr)
+	} else {
+		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+	}
+
+	atomic.StoreInt32(&b.opened, 0)
+
+	return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+	return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+	return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known.  The returned value corresponds to the
+// broker's broker.rack configuration setting.  Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+	if b.rack == nil {
+		return ""
+	}
+	return *b.rack
+}
+
+//GetMetadata send a metadata request and returns a metadata response or error
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+	response := new(MetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+	response := new(ConsumerMetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//FindCoordinator sends a find coordinate request and returns a response or error
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+	response := new(FindCoordinatorResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//GetAvailableOffsets return an offset response or error
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+	response := new(OffsetResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//Produce returns a produce response or error
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+	var (
+		response *ProduceResponse
+		err      error
+	)
+
+	if request.RequiredAcks == NoResponse {
+		err = b.sendAndReceive(request, nil)
+	} else {
+		response = new(ProduceResponse)
+		err = b.sendAndReceive(request, response)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//Fetch returns a FetchResponse or error
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+	response := new(FetchResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//CommitOffset return an Offset commit reponse or error
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	response := new(OffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//FetchOffset returns an offset fetch response or error
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+	response := new(OffsetFetchResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//JoinGroup returns a join group response or error
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+	response := new(JoinGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//SyncGroup returns a sync group response or error
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+	response := new(SyncGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//LeaveGroup return a leave group response or error
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	response := new(LeaveGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//Heartbeat returns a heartbeat response or error
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+	response := new(HeartbeatResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//ListGroups return a list group response or error
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+	response := new(ListGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DescribeGroups return describe group response or error
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+	response := new(DescribeGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//ApiVersions return api version response or error
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+	response := new(ApiVersionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//CreateTopics send a create topic request and returns create topic response
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	response := new(CreateTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DeleteTopics sends a delete topic request and returns delete topic response
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	response := new(DeleteTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//CreatePartitions sends a create partition request and returns create
+//partitions response or error
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+	response := new(CreatePartitionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DeleteRecords send a request to delete records and return delete record
+//response or error
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+	response := new(DeleteRecordsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DescribeAcls sends a describe acl request and returns a response or error
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+	response := new(DescribeAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//CreateAcls sends a create acl request and returns a response or error
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+	response := new(CreateAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DeleteAcls sends a delete acl request and returns a response or error
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+	response := new(DeleteAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//InitProducerID sends an init producer request and returns a response or error
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	response := new(InitProducerIDResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//AddPartitionsToTxn send a request to add partition to txn and returns
+//a response or error
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+	response := new(AddPartitionsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+//or error
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+	response := new(AddOffsetsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//EndTxn sends a request to end txn and returns a response or error
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+	response := new(EndTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//TxnOffsetCommit sends a request to commit transaction offsets and returns
+//a response or error
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+	response := new(TxnOffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DescribeConfigs sends a request to describe config and returns a response or
+//error
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	response := new(DescribeConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//AlterConfigs sends a request to alter config and return a response or error
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	response := new(AlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DeleteGroups sends a request to delete groups and returns a response or error
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+	response := new(DeleteGroupsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+//DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+	response := new(DescribeLogDirsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+		return 0, err
+	}
+
+	return io.ReadFull(b.conn, buf)
+}
+
+// write  ensures the conn WriteDeadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+
+	return b.conn.Write(buf)
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		if b.connErr != nil {
+			return nil, b.connErr
+		}
+		return nil, ErrNotConnected
+	}
+
+	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return nil, err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		return nil, err
+	}
+	b.correlationID++
+
+	if !promiseResponse {
+		// Record request latency without the response
+		b.updateRequestLatencyMetrics(time.Since(requestTime))
+		return nil, nil
+	}
+
+	promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+	b.responses <- promise
+
+	return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+	promise, err := b.send(req, res != nil)
+	if err != nil {
+		return err
+	}
+
+	if promise == nil {
+		return nil
+	}
+
+	select {
+	case buf := <-promise.packets:
+		return versionedDecode(buf, res, req.version())
+	case err = <-promise.errors:
+		return err
+	}
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+	b.id, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	host, err := pd.getString()
+	if err != nil {
+		return err
+	}
+
+	port, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		b.rack, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+	if _, _, err := net.SplitHostPort(b.addr); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+	host, portstr, err := net.SplitHostPort(b.addr)
+	if err != nil {
+		return err
+	}
+
+	port, err := strconv.Atoi(portstr)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(b.id)
+
+	err = pe.putString(host)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(int32(port))
+
+	if version >= 1 {
+		err = pe.putNullableString(b.rack)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *Broker) responseReceiver() {
+	var dead error
+	header := make([]byte, 8)
+
+	for response := range b.responses {
+		if dead != nil {
+			response.errors <- dead
+			continue
+		}
+
+		bytesReadHeader, err := b.readFull(header)
+		requestLatency := time.Since(response.requestTime)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		decodedHeader := responseHeader{}
+		err = decode(header, &decodedHeader)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+		if decodedHeader.correlationID != response.correlationID {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			// TODO if decoded ID < cur ID, discard until we catch up
+			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+			response.errors <- dead
+			continue
+		}
+
+		buf := make([]byte, decodedHeader.length-4)
+		bytesReadBody, err := b.readFull(buf)
+		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+		if err != nil {
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		response.packets <- buf
+	}
+	close(b.done)
+}
+
+func (b *Broker) authenticateViaSASL() error {
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeOAuth:
+		return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv1()
+	case SASLTypeGSSAPI:
+		return b.sendAndReceiveKerberos()
+	default:
+		return b.sendAndReceiveSASLPlainAuth()
+	}
+}
+
+func (b *Broker) sendAndReceiveKerberos() error {
+	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+	}
+	return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+		return err
+	}
+	b.correlationID++
+
+	header := make([]byte, 8) // response header
+	_, err = b.readFull(header)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+		return err
+	}
+
+	length := binary.BigEndian.Uint32(header[:4])
+	payload := make([]byte, length-4)
+	n, err := b.readFull(payload)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+		return err
+	}
+
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &SaslHandshakeResponse{}
+
+	err = versionedDecode(payload, res, 0)
+	if err != nil {
+		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+		return err
+	}
+
+	if res.Err != ErrNoError {
+		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+		return res.Err
+	}
+
+	Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
+	return nil
+}
+
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
+//   authcid   = 1*SAFE ; MUST accept up to 255 octets
+//   authzid   = 1*SAFE ; MUST accept up to 255 octets
+//   passwd    = 1*SAFE ; MUST accept up to 255 octets
+//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
+//
+//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
+//                  ;; any UTF-8 encoded Unicode character except NUL
+//
+// With SASL v0 handshake and auth then:
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection.
+//
+// With SASL v1 handshake and auth then:
+// When credentials are invalid, Kafka replies with a SaslAuthenticate response
+// containing an error code and message detailing the authentication failure.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+	// default to V0 to allow for backward compatability when SASL is enabled
+	// but not the handshake
+	if b.conf.Net.SASL.Handshake {
+
+		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+			return handshakeErr
+		}
+	}
+
+	if b.conf.Net.SASL.Version == SASLHandshakeV1 {
+		return b.sendAndReceiveV1SASLPlainAuth()
+	}
+	return b.sendAndReceiveV0SASLPlainAuth()
+}
+
+// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
+func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
+
+	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) //4 byte length header + auth data
+	binary.BigEndian.PutUint32(authBytes, uint32(length))
+	copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+	requestTime := time.Now()
+	bytesWritten, err := b.write(authBytes)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	if err != nil {
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	header := make([]byte, 4)
+	n, err := b.readFull(header)
+	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+	// If the credentials are valid, we would get a 4 byte response filled with null characters.
+	// Otherwise, the broker closes the connection and we get an EOF
+	if err != nil {
+		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+	return nil
+}
+
+// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
+func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
+	correlationID := b.correlationID
+
+	requestTime := time.Now()
+
+	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
+
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+
+	if err != nil {
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	b.correlationID++
+
+	bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
+	b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
+
+	// With v1 sasl we get an error message set in the response we can return
+	if err != nil {
+		Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
+// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
+func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
+	if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
+		return err
+	}
+
+	token, err := provider.Token()
+	if err != nil {
+		return err
+	}
+
+	message, err := buildClientFirstMessage(token)
+	if err != nil {
+		return err
+	}
+
+	challenged, err := b.sendClientMessage(message)
+	if err != nil {
+		return err
+	}
+
+	if challenged {
+		// Abort the token exchange. The broker returns the failure code.
+		_, err = b.sendClientMessage([]byte(`\x01`))
+	}
+
+	return err
+}
+
+// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
+// if the broker responds with a challenge, in which case the token is
+// rejected.
+func (b *Broker) sendClientMessage(message []byte) (bool, error) {
+
+	requestTime := time.Now()
+	correlationID := b.correlationID
+
+	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
+	if err != nil {
+		return false, err
+	}
+
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	b.correlationID++
+
+	res := &SaslAuthenticateResponse{}
+	bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
+
+	requestLatency := time.Since(requestTime)
+	b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+
+	isChallenge := len(res.SaslAuthBytes) > 0
+
+	if isChallenge && err != nil {
+		Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
+	}
+
+	return isChallenge, err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
+	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
+		return err
+	}
+
+	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
+
+	}
+
+	for !scramClient.Done() {
+		requestTime := time.Now()
+		correlationID := b.correlationID
+		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
+		if err != nil {
+			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+
+		b.updateOutgoingCommunicationMetrics(bytesWritten)
+		b.correlationID++
+		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
+		if err != nil {
+			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+
+		b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
+		msg, err = scramClient.Step(string(challenge))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	Logger.Println("SASL authentication succeeded")
+	return nil
+}
+
+func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
+	rb := &SaslAuthenticateRequest{msg}
+	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return 0, err
+	}
+
+	return b.write(buf)
+}
+
+func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
+	buf := make([]byte, responseLengthSize+correlationIDSize)
+	_, err := b.readFull(buf)
+	if err != nil {
+		return nil, err
+	}
+
+	header := responseHeader{}
+	err = decode(buf, &header)
+	if err != nil {
+		return nil, err
+	}
+
+	if header.correlationID != correlationID {
+		return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
+	}
+
+	buf = make([]byte, header.length-correlationIDSize)
+	_, err = b.readFull(buf)
+	if err != nil {
+		return nil, err
+	}
+
+	res := &SaslAuthenticateResponse{}
+	if err := versionedDecode(buf, res, 0); err != nil {
+		return nil, err
+	}
+	if res.Err != ErrNoError {
+		return nil, res.Err
+	}
+	return res.SaslAuthBytes, nil
+}
+
+// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
+// https://tools.ietf.org/html/rfc7628
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
+	var ext string
+
+	if token.Extensions != nil && len(token.Extensions) > 0 {
+		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
+			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
+		}
+		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
+	}
+
+	resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
+
+	return resp, nil
+}
+
+// mapToString returns a list of key-value pairs ordered by key.
+// keyValSep separates the key from the value. elemSep separates each pair.
+func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
+	buf := make([]string, 0, len(extensions))
+
+	for k, v := range extensions {
+		buf = append(buf, k+keyValSep+v)
+	}
+
+	sort.Strings(buf)
+
+	return strings.Join(buf, elemSep)
+}
+
+func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
+	authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	rb := &SaslAuthenticateRequest{authBytes}
+	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return 0, err
+	}
+
+	return b.write(buf)
+}
+
+func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
+
+	rb := &SaslAuthenticateRequest{initialResp}
+
+	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
+
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return 0, err
+	}
+
+	return b.write(buf)
+}
+
+func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
+	buf := make([]byte, responseLengthSize+correlationIDSize)
+	bytesRead, err := b.readFull(buf)
+	if err != nil {
+		return bytesRead, err
+	}
+
+	header := responseHeader{}
+	err = decode(buf, &header)
+	if err != nil {
+		return bytesRead, err
+	}
+
+	if header.correlationID != correlationID {
+		return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
+	}
+
+	buf = make([]byte, header.length-correlationIDSize)
+	c, err := b.readFull(buf)
+	bytesRead += c
+	if err != nil {
+		return bytesRead, err
+	}
+
+	if err := versionedDecode(buf, res, 0); err != nil {
+		return bytesRead, err
+	}
+
+	if res.Err != ErrNoError {
+		return bytesRead, res.Err
+	}
+
+	return bytesRead, nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+	b.updateRequestLatencyMetrics(requestLatency)
+	b.responseRate.Mark(1)
+
+	if b.brokerResponseRate != nil {
+		b.brokerResponseRate.Mark(1)
+	}
+
+	responseSize := int64(bytes)
+	b.incomingByteRate.Mark(responseSize)
+	if b.brokerIncomingByteRate != nil {
+		b.brokerIncomingByteRate.Mark(responseSize)
+	}
+
+	b.responseSize.Update(responseSize)
+	if b.brokerResponseSize != nil {
+		b.brokerResponseSize.Update(responseSize)
+	}
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+	requestLatencyInMs := int64(requestLatency / time.Millisecond)
+	b.requestLatency.Update(requestLatencyInMs)
+
+	if b.brokerRequestLatency != nil {
+		b.brokerRequestLatency.Update(requestLatencyInMs)
+	}
+
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+	b.requestRate.Mark(1)
+	if b.brokerRequestRate != nil {
+		b.brokerRequestRate.Mark(1)
+	}
+
+	requestSize := int64(bytes)
+	b.outgoingByteRate.Mark(requestSize)
+	if b.brokerOutgoingByteRate != nil {
+		b.brokerOutgoingByteRate.Mark(requestSize)
+	}
+
+	b.requestSize.Update(requestSize)
+	if b.brokerRequestSize != nil {
+		b.brokerRequestSize.Update(requestSize)
+	}
+
+}
+
+func (b *Broker) registerMetrics() {
+	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+	b.brokerRequestRate = b.registerMeter("request-rate")
+	b.brokerRequestSize = b.registerHistogram("request-size")
+	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+	b.brokerResponseRate = b.registerMeter("response-rate")
+	b.brokerResponseSize = b.registerHistogram("response-size")
+}
+
+func (b *Broker) unregisterMetrics() {
+	for _, name := range b.registeredMetrics {
+		b.conf.MetricRegistry.Unregister(name)
+	}
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+	nameForBroker := getMetricNameForBroker(name, b)
+	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
+	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 0000000..e5b3557
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,1013 @@
+package sarama
+
+import (
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+	// Config returns the Config struct of the client. This struct should not be
+	// altered after it has been created.
+	Config() *Config
+
+	// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
+	Controller() (*Broker, error)
+
+	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
+	Brokers() []*Broker
+
+	// Topics returns the set of available topics as retrieved from cluster metadata.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	Partitions(topic string) ([]int32, error)
+
+	// WritablePartitions returns the sorted list of all writable partition IDs for
+	// the given topic, where "writable" means "having a valid leader accepting
+	// writes".
+	WritablePartitions(topic string) ([]int32, error)
+
+	// Leader returns the broker object that is the leader of the current
+	// topic/partition, as determined by querying the cluster metadata.
+	Leader(topic string, partitionID int32) (*Broker, error)
+
+	// Replicas returns the set of all replica IDs for the given partition.
+	Replicas(topic string, partitionID int32) ([]int32, error)
+
+	// InSyncReplicas returns the set of all in-sync replica IDs for the given
+	// partition. In-sync replicas are replicas which are fully caught up with
+	// the partition leader.
+	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// OfflineReplicas returns the set of all offline replica IDs for the given
+	// partition. Offline replicas are replicas which are offline
+	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
+	// available metadata for those topics. If no topics are provided, it will refresh
+	// metadata for all topics.
+	RefreshMetadata(topics ...string) error
+
+	// GetOffset queries the cluster to get the most recent available offset at the
+	// given time (in milliseconds) on the topic/partition combination.
+	// Time should be OffsetOldest for the earliest available offset,
+	// OffsetNewest for the offset of the message that will be produced next, or a time.
+	GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.8.2 and higher.
+	Coordinator(consumerGroup string) (*Broker, error)
+
+	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+	// in local cache. This function only works on Kafka 0.8.2 and higher.
+	RefreshCoordinator(consumerGroup string) error
+
+	// InitProducerID retrieves information required for Idempotent Producer
+	InitProducerID() (*InitProducerIDResponse, error)
+
+	// Close shuts down all broker connections managed by this client. It is required
+	// to call this function before a client object passes out of scope, as it will
+	// otherwise leak memory. You must close any Producers or Consumers using a client
+	// before you close the client.
+	Close() error
+
+	// Closed returns true if the client has already had Close called on it
+	Closed() bool
+}
+
+const (
+	// OffsetNewest stands for the log head offset, i.e. the offset that will be
+	// assigned to the next message that will be produced to the partition. You
+	// can send this to a client's GetOffset method to get this offset, or when
+	// calling ConsumePartition to start consuming new messages.
+	OffsetNewest int64 = -1
+	// OffsetOldest stands for the oldest offset available on the broker for a
+	// partition. You can send this to a client's GetOffset method to get this
+	// offset, or when calling ConsumePartition to start consuming from the
+	// oldest offset that is still available on the broker.
+	OffsetOldest int64 = -2
+)
+
+type client struct {
+	conf           *Config
+	closer, closed chan none // for shutting down background metadata updater
+
+	// the broker addresses given to us through the constructor are not guaranteed to be returned in
+	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+	// so we store them separately
+	seedBrokers []*Broker
+	deadSeeds   []*Broker
+
+	controllerID   int32                                   // cluster controller broker id
+	brokers        map[int32]*Broker                       // maps broker ids to brokers
+	metadata       map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+	metadataTopics map[string]none                         // topics that need to collect metadata
+	coordinators   map[string]int32                        // Maps consumer group names to coordinating broker IDs
+
+	// If the number of partitions is large, we can get some churn calling cachedPartitions,
+	// so the result is cached.  It is important to update this value whenever metadata is changed
+	cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+	lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+	Logger.Println("Initializing new client")
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	if err := conf.Validate(); err != nil {
+		return nil, err
+	}
+
+	if len(addrs) < 1 {
+		return nil, ConfigurationError("You must provide at least one broker address")
+	}
+
+	client := &client{
+		conf:                    conf,
+		closer:                  make(chan none),
+		closed:                  make(chan none),
+		brokers:                 make(map[int32]*Broker),
+		metadata:                make(map[string]map[int32]*PartitionMetadata),
+		metadataTopics:          make(map[string]none),
+		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+		coordinators:            make(map[string]int32),
+	}
+
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+
+	if conf.Metadata.Full {
+		// do an initial fetch of all cluster metadata by specifying an empty list of topics
+		err := client.RefreshMetadata()
+		switch err {
+		case nil:
+			break
+		case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
+			Logger.Println(err)
+		default:
+			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+			_ = client.Close()
+			return nil, err
+		}
+	}
+	go withRecover(client.backgroundMetadataUpdater)
+
+	Logger.Println("Successfully initialized new client")
+
+	return client, nil
+}
+
+func (client *client) Config() *Config {
+	return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	brokers := make([]*Broker, 0, len(client.brokers))
+	for _, broker := range client.brokers {
+		brokers = append(brokers, broker)
+	}
+	return brokers
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+	var err error
+	for broker := client.any(); broker != nil; broker = client.any() {
+
+		req := &InitProducerIDRequest{}
+
+		response, err := broker.InitProducerID(req)
+		switch err.(type) {
+		case nil:
+			return response, nil
+		default:
+			// some error, remove that broker and try again
+			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+	return nil, err
+}
+
+func (client *client) Close() error {
+	if client.Closed() {
+		// Chances are this is being called from a defer() and the error will go unobserved
+		// so we go ahead and log the event in this case.
+		Logger.Printf("Close() called on already closed client")
+		return ErrClosedClient
+	}
+
+	// shutdown and wait for the background thread before we take the lock, to avoid races
+	close(client.closer)
+	<-client.closed
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	Logger.Println("Closing Client")
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	client.brokers = nil
+	client.metadata = nil
+	client.metadataTopics = nil
+
+	return nil
+}
+
+func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadata))
+	for topic := range client.metadata {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadataTopics))
+	for topic := range client.metadataTopics {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, allPartitions)
+
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, allPartitions)
+	}
+
+	// no partitions found after refresh metadata
+	if len(partitions) == 0 {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, writablePartitions)
+
+	// len==0 catches when it's nil (no such topic) and the odd case when every single
+	// partition is undergoing leader election simultaneously. Callers have to be able to handle
+	// this function returning an empty slice (which is a valid return value) but catching it
+	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+	// a metadata refresh as a nicety so callers can just try again and don't have to manually
+	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, writablePartitions)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Replicas), metadata.Err
+	}
+	return dupInt32Slice(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Isr), metadata.Err
+	}
+	return dupInt32Slice(metadata.Isr), nil
+}
+
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
+	}
+	return dupInt32Slice(metadata.OfflineReplicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	leader, err := client.cachedLeader(topic, partitionID)
+
+	if leader == nil {
+		err = client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		leader, err = client.cachedLeader(topic, partitionID)
+	}
+
+	return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+	// error. This handles the case by returning an error instead of sending it
+	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+	for _, topic := range topics {
+		if len(topic) == 0 {
+			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+		}
+	}
+
+	deadline := time.Time{}
+	if client.conf.Metadata.Timeout > 0 {
+		deadline = time.Now().Add(client.conf.Metadata.Timeout)
+	}
+	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+	if client.Closed() {
+		return -1, ErrClosedClient
+	}
+
+	offset, err := client.getOffset(topic, partitionID, time)
+
+	if err != nil {
+		if err := client.RefreshMetadata(topic); err != nil {
+			return -1, err
+		}
+		return client.getOffset(topic, partitionID, time)
+	}
+
+	return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		if err := client.refreshMetadata(); err != nil {
+			return nil, err
+		}
+		controller = client.cachedController()
+	}
+
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedCoordinator(consumerGroup)
+
+	if coordinator == nil {
+		if err := client.RefreshCoordinator(consumerGroup); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedCoordinator(consumerGroup)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.coordinators[consumerGroup] = response.Coordinator.ID()
+	return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
+	if client.brokers[broker.ID()] == nil {
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+		safeAsyncClose(client.brokers[broker.ID()])
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+	}
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+		client.deadSeeds = append(client.deadSeeds, broker)
+		client.seedBrokers = client.seedBrokers[1:]
+	} else {
+		// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+		// but we really shouldn't have to; once that loop is made better this case can be
+		// removed, and the function generally can be renamed from `deregisterBroker` to
+		// `nextSeedBroker` or something
+		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+		delete(client.brokers, broker.ID())
+	}
+}
+
+func (client *client) resurrectDeadBrokers() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+	client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	if len(client.seedBrokers) > 0 {
+		_ = client.seedBrokers[0].Open(client.conf)
+		return client.seedBrokers[0]
+	}
+
+	// not guaranteed to be random *or* deterministic
+	for _, broker := range client.brokers {
+		_ = broker.Open(client.conf)
+		return broker
+	}
+
+	return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+	allPartitions partitionType = iota
+	writablePartitions
+	// If you add any more types, update the partition cache in update()
+
+	// Ensure this is the last partition type value
+	maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		return partitions[partitionID]
+	}
+
+	return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions, exists := client.cachedPartitionsResults[topic]
+
+	if !exists {
+		return nil
+	}
+	return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+	partitions := client.metadata[topic]
+
+	if partitions == nil {
+		return nil
+	}
+
+	ret := make([]int32, 0, len(partitions))
+	for _, partition := range partitions {
+		if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+			continue
+		}
+		ret = append(ret, partition.ID)
+	}
+
+	sort.Sort(int32Slice(ret))
+	return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		metadata, ok := partitions[partitionID]
+		if ok {
+			if metadata.Err == ErrLeaderNotAvailable {
+				return nil, ErrLeaderNotAvailable
+			}
+			b := client.brokers[metadata.Leader]
+			if b == nil {
+				return nil, ErrLeaderNotAvailable
+			}
+			_ = b.Open(client.conf)
+			return b, nil
+		}
+	}
+
+	return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+	broker, err := client.Leader(topic, partitionID)
+	if err != nil {
+		return -1, err
+	}
+
+	request := &OffsetRequest{}
+	if client.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 1
+	}
+	request.AddBlock(topic, partitionID, time, 1)
+
+	response, err := broker.GetAvailableOffsets(request)
+	if err != nil {
+		_ = broker.Close()
+		return -1, err
+	}
+
+	block := response.GetBlock(topic, partitionID)
+	if block == nil {
+		_ = broker.Close()
+		return -1, ErrIncompleteResponse
+	}
+	if block.Err != ErrNoError {
+		return -1, block.Err
+	}
+	if len(block.Offsets) != 1 {
+		return -1, ErrOffsetOutOfRange
+	}
+
+	return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+	defer close(client.closed)
+
+	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := client.refreshMetadata(); err != nil {
+				Logger.Println("Client background metadata update:", err)
+			}
+		case <-client.closer:
+			return
+		}
+	}
+}
+
+func (client *client) refreshMetadata() error {
+	topics := []string{}
+
+	if !client.conf.Metadata.Full {
+		if specificTopics, err := client.MetadataTopics(); err != nil {
+			return err
+		} else if len(specificTopics) == 0 {
+			return ErrNoTopicsToUpdateMetadata
+		} else {
+			topics = specificTopics
+		}
+	}
+
+	if err := client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+	pastDeadline := func(backoff time.Duration) bool {
+		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+			// we are past the deadline
+			return true
+		}
+		return false
+	}
+	retry := func(err error) error {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			if pastDeadline(backoff) {
+				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+				return err
+			}
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			if backoff > 0 {
+				time.Sleep(backoff)
+			}
+			return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
+		}
+		return err
+	}
+
+	broker := client.any()
+	for ; broker != nil && !pastDeadline(0); broker = client.any() {
+		allowAutoTopicCreation := true
+		if len(topics) > 0 {
+			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+		} else {
+			allowAutoTopicCreation = false
+			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+		}
+
+		req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
+		if client.conf.Version.IsAtLeast(V1_0_0_0) {
+			req.Version = 5
+		} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
+			req.Version = 1
+		}
+		response, err := broker.GetMetadata(req)
+		switch err.(type) {
+		case nil:
+			allKnownMetaData := len(topics) == 0
+			// valid response, use it
+			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+			if shouldRetry {
+				Logger.Println("client/metadata found some partitions to be leaderless")
+				return retry(err) // note: err can be nil
+			}
+			return err
+
+		case PacketEncodingError:
+			// didn't even send, return the error
+			return err
+
+		case KError:
+			// if SASL auth error return as this _should_ be a non retryable err for all brokers
+			if err.(KError) == ErrSASLAuthenticationFailed {
+				Logger.Println("client/metadata failed SASL authentication")
+				return err
+			}
+
+			if err.(KError) == ErrTopicAuthorizationFailed {
+				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+				return err
+			}
+			// else remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+
+		default:
+			// some other error, remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+
+	if broker != nil {
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		return retry(ErrOutOfBrokers)
+	}
+
+	Logger.Println("client/metadata no available broker to send metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	// For all the brokers we received:
+	// - if it is a new ID, save it
+	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - otherwise ignore it, replacing our existing one would just bounce the connection
+	for _, broker := range data.Brokers {
+		client.registerBroker(broker)
+	}
+
+	client.controllerID = data.ControllerID
+
+	if allKnownMetaData {
+		client.metadata = make(map[string]map[int32]*PartitionMetadata)
+		client.metadataTopics = make(map[string]none)
+		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+	}
+	for _, topic := range data.Topics {
+		// topics must be added firstly to `metadataTopics` to guarantee that all
+		// requested topics must be recorded to keep them trackable for periodically
+		// metadata refresh.
+		if _, exists := client.metadataTopics[topic.Name]; !exists {
+			client.metadataTopics[topic.Name] = none{}
+		}
+		delete(client.metadata, topic.Name)
+		delete(client.cachedPartitionsResults, topic.Name)
+
+		switch topic.Err {
+		case ErrNoError:
+			// no-op
+		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+			err = topic.Err
+			continue
+		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+			err = topic.Err
+			retry = true
+			continue
+		case ErrLeaderNotAvailable: // retry, but store partial partition results
+			retry = true
+		default: // don't retry, don't store partial results
+			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+			err = topic.Err
+			continue
+		}
+
+		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			client.metadata[topic.Name][partition.ID] = partition
+			if partition.Err == ErrLeaderNotAvailable {
+				retry = true
+			}
+		}
+
+		var partitionCache [maxPartitionIndex][]int32
+		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+		client.cachedPartitionsResults[topic.Name] = partitionCache
+	}
+
+	return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedController() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers[client.controllerID]
+}
+
+func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
+	if client.conf.Metadata.Retry.BackoffFunc != nil {
+		maxRetries := client.conf.Metadata.Retry.Max
+		retries := maxRetries - attemptsRemaining
+		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
+	}
+	return client.conf.Metadata.Retry.Backoff
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+	retry := func(err error) (*FindCoordinatorResponse, error) {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(backoff)
+			return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+		}
+		return nil, err
+	}
+
+	for broker := client.any(); broker != nil; broker = client.any() {
+		Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+		request := new(FindCoordinatorRequest)
+		request.CoordinatorKey = consumerGroup
+		request.CoordinatorType = CoordinatorGroup
+
+		response, err := broker.FindCoordinator(request)
+
+		if err != nil {
+			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+			switch err.(type) {
+			case PacketEncodingError:
+				return nil, err
+			default:
+				_ = broker.Close()
+				client.deregisterBroker(broker)
+				continue
+			}
+		}
+
+		switch response.Err {
+		case ErrNoError:
+			Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+			return response, nil
+
+		case ErrConsumerCoordinatorNotAvailable:
+			Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+			// This is very ugly, but this scenario will only happen once per cluster.
+			// The __consumer_offsets topic only has to be created one time.
+			// The number of partitions not configurable, but partition 0 should always exist.
+			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+				time.Sleep(2 * time.Second)
+			}
+
+			return retry(ErrConsumerCoordinatorNotAvailable)
+		case ErrGroupAuthorizationFailed:
+			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
+			return retry(ErrGroupAuthorizationFailed)
+
+		default:
+			return nil, response.Err
+		}
+	}
+
+	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+	Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
new file mode 100644
index 0000000..9247c35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/compress.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"sync"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+var (
+	lz4WriterPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewWriter(nil)
+		},
+	}
+
+	gzipWriterPool = sync.Pool{
+		New: func() interface{} {
+			return gzip.NewWriter(nil)
+		},
+	}
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var (
+			err    error
+			buf    bytes.Buffer
+			writer *gzip.Writer
+		)
+		if level != CompressionLevelDefault {
+			writer, err = gzip.NewWriterLevel(&buf, level)
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			writer = gzipWriterPool.Get().(*gzip.Writer)
+			defer gzipWriterPool.Put(writer)
+			writer.Reset(&buf)
+		}
+		if _, err := writer.Write(data); err != nil {
+			return nil, err
+		}
+		if err := writer.Close(); err != nil {
+			return nil, err
+		}
+		return buf.Bytes(), nil
+	case CompressionSnappy:
+		return snappy.Encode(data), nil
+	case CompressionLZ4:
+		writer := lz4WriterPool.Get().(*lz4.Writer)
+		defer lz4WriterPool.Put(writer)
+
+		var buf bytes.Buffer
+		writer.Reset(&buf)
+
+		if _, err := writer.Write(data); err != nil {
+			return nil, err
+		}
+		if err := writer.Close(); err != nil {
+			return nil, err
+		}
+		return buf.Bytes(), nil
+	case CompressionZSTD:
+		return zstdCompress(nil, data)
+	default:
+		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 0000000..69c7161
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,703 @@
+package sarama
+
+import (
+	"compress/gzip"
+	"crypto/tls"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"regexp"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+	"golang.org/x/net/proxy"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+	Admin struct {
+		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+		Timeout time.Duration
+	}
+
+	// Net is the namespace for network-level properties used by the Broker, and
+	// shared by the Client/Producer/Consumer.
+	Net struct {
+		// How many outstanding requests a connection is allowed to have before
+		// sending on it blocks (default 5).
+		MaxOpenRequests int
+
+		// All three of the below configurations are similar to the
+		// `socket.timeout.ms` setting in JVM kafka. All of them default
+		// to 30 seconds.
+		DialTimeout  time.Duration // How long to wait for the initial connection.
+		ReadTimeout  time.Duration // How long to wait for a response.
+		WriteTimeout time.Duration // How long to wait for a transmit.
+
+		TLS struct {
+			// Whether or not to use TLS when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The TLS configuration to use for secure connections if
+			// enabled (defaults to nil).
+			Config *tls.Config
+		}
+
+		// SASL based authentication with broker. While there are multiple SASL authentication methods
+		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
+		SASL struct {
+			// Whether or not to use SASL authentication when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// SASLMechanism is the name of the enabled SASL mechanism.
+			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
+			Mechanism SASLMechanism
+			// Version is the SASL Protocol Version to use
+			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
+			Version int16
+			// Whether or not to send the Kafka SASL handshake first if enabled
+			// (defaults to true). You should only set this to false if you're using
+			// a non-Kafka SASL proxy.
+			Handshake bool
+			//username and password for SASL/PLAIN  or SASL/SCRAM authentication
+			User     string
+			Password string
+			// authz id used for SASL/SCRAM authentication
+			SCRAMAuthzID string
+			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+			// client used to perform the SCRAM exchange with the server.
+			SCRAMClientGeneratorFunc func() SCRAMClient
+			// TokenProvider is a user-defined callback for generating
+			// access tokens for SASL/OAUTHBEARER auth. See the
+			// AccessTokenProvider interface docs for proper implementation
+			// guidelines.
+			TokenProvider AccessTokenProvider
+
+			GSSAPI GSSAPIConfig
+		}
+
+		// KeepAlive specifies the keep-alive period for an active network connection.
+		// If zero, keep-alives are disabled. (default is 0: disabled).
+		KeepAlive time.Duration
+
+		// LocalAddr is the local address to use when dialing an
+		// address. The address must be of a compatible type for the
+		// network being dialed.
+		// If nil, a local address is automatically chosen.
+		LocalAddr net.Addr
+
+		Proxy struct {
+			// Whether or not to use proxy when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The proxy dialer to use enabled (defaults to nil).
+			Dialer proxy.Dialer
+		}
+	}
+
+	// Metadata is the namespace for metadata management properties used by the
+	// Client, and shared by the Producer/Consumer.
+	Metadata struct {
+		Retry struct {
+			// The total number of times to retry a metadata request when the
+			// cluster is in the middle of a leader election (default 3).
+			Max int
+			// How long to wait for leader election to occur before retrying
+			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+		}
+		// How frequently to refresh the cluster metadata in the background.
+		// Defaults to 10 minutes. Set to 0 to disable. Similar to
+		// `topic.metadata.refresh.interval.ms` in the JVM version.
+		RefreshFrequency time.Duration
+
+		// Whether to maintain a full set of metadata for all topics, or just
+		// the minimal set that has been necessary so far. The full set is simpler
+		// and usually more convenient, but can take up a substantial amount of
+		// memory if you have many topics and partitions. Defaults to true.
+		Full bool
+
+		// How long to wait for a successful metadata response.
+		// Disabled by default which means a metadata request against an unreachable
+		// cluster (all brokers are unreachable or unresponsive) can take up to
+		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+		// to fail.
+		Timeout time.Duration
+	}
+
+	// Producer is the namespace for configuration related to producing messages,
+	// used by the Producer.
+	Producer struct {
+		// The maximum permitted size of a message (defaults to 1000000). Should be
+		// set equal to or smaller than the broker's `message.max.bytes`.
+		MaxMessageBytes int
+		// The level of acknowledgement reliability needed from the broker (defaults
+		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+		// JVM producer.
+		RequiredAcks RequiredAcks
+		// The maximum duration the broker will wait the receipt of the number of
+		// RequiredAcks (defaults to 10 seconds). This is only relevant when
+		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
+		// millisecond resolution, nanoseconds will be truncated. Equivalent to
+		// the JVM producer's `request.timeout.ms` setting.
+		Timeout time.Duration
+		// The type of compression to use on messages (defaults to no compression).
+		// Similar to `compression.codec` setting of the JVM producer.
+		Compression CompressionCodec
+		// The level of compression to use on messages. The meaning depends
+		// on the actual compression type used and defaults to default compression
+		// level for the codec.
+		CompressionLevel int
+		// Generates partitioners for choosing the partition to send messages to
+		// (defaults to hashing the message key). Similar to the `partitioner.class`
+		// setting for the JVM producer.
+		Partitioner PartitionerConstructor
+		// If enabled, the producer will ensure that exactly one copy of each message is
+		// written.
+		Idempotent bool
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock. If,
+		// however, this config is used to create a `SyncProducer`, both must be set
+		// to true and you shall not read from the channels since the producer does
+		// this internally.
+		Return struct {
+			// If enabled, successfully delivered messages will be returned on the
+			// Successes channel (default disabled).
+			Successes bool
+
+			// If enabled, messages that failed to deliver will be returned on the
+			// Errors channel, including error (default enabled).
+			Errors bool
+		}
+
+		// The following config options control how often messages are batched up and
+		// sent to the broker. By default, messages are sent as fast as possible, and
+		// all messages received while the current batch is in-flight are placed
+		// into the subsequent batch.
+		Flush struct {
+			// The best-effort number of bytes needed to trigger a flush. Use the
+			// global sarama.MaxRequestSize to set a hard upper limit.
+			Bytes int
+			// The best-effort number of messages needed to trigger a flush. Use
+			// `MaxMessages` to set a hard upper limit.
+			Messages int
+			// The best-effort frequency of flushes. Equivalent to
+			// `queue.buffering.max.ms` setting of JVM producer.
+			Frequency time.Duration
+			// The maximum number of messages the producer will send in a single
+			// broker request. Defaults to 0 for unlimited. Similar to
+			// `queue.buffering.max.messages` in the JVM producer.
+			MaxMessages int
+		}
+
+		Retry struct {
+			// The total number of times to retry sending a message (default 3).
+			// Similar to the `message.send.max.retries` setting of the JVM producer.
+			Max int
+			// How long to wait for the cluster to settle between retries
+			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
+			// JVM producer.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+		}
+	}
+
+	// Consumer is the namespace for configuration related to consuming messages,
+	// used by the Consumer.
+	Consumer struct {
+
+		// Group is the namespace for configuring consumer group.
+		Group struct {
+			Session struct {
+				// The timeout used to detect consumer failures when using Kafka's group management facility.
+				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
+				// If no heartbeats are received by the broker before the expiration of this session timeout,
+				// then the broker will remove this consumer from the group and initiate a rebalance.
+				// Note that the value must be in the allowable range as configured in the broker configuration
+				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+				Timeout time.Duration
+			}
+			Heartbeat struct {
+				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
+				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+				// to facilitate rebalancing when new consumers join or leave the group.
+				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+				// higher than 1/3 of that value.
+				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+				Interval time.Duration
+			}
+			Rebalance struct {
+				// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
+				Strategy BalanceStrategy
+				// The maximum allowed time for each worker to join the group once a rebalance has begun.
+				// This is basically a limit on the amount of time needed for all tasks to flush any pending
+				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+				// the group, which will cause offset commit failures (default 60s).
+				Timeout time.Duration
+
+				Retry struct {
+					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+					// the load to assign partitions to each consumer. If the set of consumers changes while
+					// this assignment is taking place the rebalance will fail and retry. This setting controls
+					// the maximum number of attempts before giving up (default 4).
+					Max int
+					// Backoff time between retries during rebalance (default 2s)
+					Backoff time.Duration
+				}
+			}
+			Member struct {
+				// Custom metadata to include when joining the group. The user data for all joined members
+				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
+				// coordinator for the group.
+				UserData []byte
+			}
+		}
+
+		Retry struct {
+			// How long to wait after a failing to read from a partition before
+			// trying again (default 2s).
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries int) time.Duration
+		}
+
+		// Fetch is the namespace for controlling how many bytes are retrieved by any
+		// given request.
+		Fetch struct {
+			// The minimum number of message bytes to fetch in a request - the broker
+			// will wait until at least this many are available. The default is 1,
+			// as 0 causes the consumer to spin when no messages are available.
+			// Equivalent to the JVM's `fetch.min.bytes`.
+			Min int32
+			// The default number of message bytes to fetch from the broker in each
+			// request (default 1MB). This should be larger than the majority of
+			// your messages, or else the consumer will spend a lot of time
+			// negotiating sizes and not actually consuming. Similar to the JVM's
+			// `fetch.message.max.bytes`.
+			Default int32
+			// The maximum number of message bytes to fetch from the broker in a
+			// single request. Messages larger than this will return
+			// ErrMessageTooLarge and will not be consumable, so you must be sure
+			// this is at least as large as your largest message. Defaults to 0
+			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+			// global `sarama.MaxResponseSize` still applies.
+			Max int32
+		}
+		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
+		// bytes to become available before it returns fewer than that anyways. The
+		// default is 250ms, since 0 causes the consumer to spin when no events are
+		// available. 100-500ms is a reasonable range for most cases. Kafka only
+		// supports precision up to milliseconds; nanoseconds will be truncated.
+		// Equivalent to the JVM's `fetch.wait.max.ms`.
+		MaxWaitTime time.Duration
+
+		// The maximum amount of time the consumer expects a message takes to
+		// process for the user. If writing to the Messages channel takes longer
+		// than this, that partition will stop fetching more messages until it
+		// can proceed again.
+		// Note that, since the Messages channel is buffered, the actual grace time is
+		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
+		// If a message is not written to the Messages channel between two ticks
+		// of the expiryTicker then a timeout is detected.
+		// Using a ticker instead of a timer to detect timeouts should typically
+		// result in many fewer calls to Timer functions which may result in a
+		// significant performance improvement if many messages are being sent
+		// and timeouts are infrequent.
+		// The disadvantage of using a ticker instead of a timer is that
+		// timeouts will be less accurate. That is, the effective timeout could
+		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+		// between two messages being sent may not be recognized as a timeout.
+		MaxProcessingTime time.Duration
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from them to prevent deadlock.
+		Return struct {
+			// If enabled, any errors that occurred while consuming are returned on
+			// the Errors channel (default disabled).
+			Errors bool
+		}
+
+		// Offsets specifies configuration for how and when to commit consumed
+		// offsets. This currently requires the manual use of an OffsetManager
+		// but will eventually be automated.
+		Offsets struct {
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
+
+			// The initial offset to use if no offset was previously committed.
+			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+			Initial int64
+
+			// The retention duration for committed offsets. If zero, disabled
+			// (in which case the `offsets.retention.minutes` option on the
+			// broker will be used).  Kafka only supports precision up to
+			// milliseconds; nanoseconds will be truncated. Requires Kafka
+			// broker version 0.9.0 or later.
+			// (default is 0: disabled).
+			Retention time.Duration
+
+			Retry struct {
+				// The total number of times to retry failing commit
+				// requests during OffsetManager shutdown (default 3).
+				Max int
+			}
+		}
+
+		// IsolationLevel support 2 mode:
+		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
+		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
+		IsolationLevel IsolationLevel
+	}
+
+	// A user-provided string sent with every request to the brokers for logging,
+	// debugging, and auditing purposes. Defaults to "sarama", but you should
+	// probably set it to something specific to your application.
+	ClientID string
+	// The number of events to buffer in internal and external channels. This
+	// permits the producer and consumer to continue processing some messages
+	// in the background while user code is working, greatly improving throughput.
+	// Defaults to 256.
+	ChannelBufferSize int
+	// The version of Kafka that Sarama will assume it is running against.
+	// Defaults to the oldest supported stable version. Since Kafka provides
+	// backwards-compatibility, setting it to a version older than you have
+	// will not break anything, although it may prevent you from using the
+	// latest features. Setting it to a version greater than you are actually
+	// running may lead to random breakage.
+	Version KafkaVersion
+	// The registry to define metrics into.
+	// Defaults to a local registry.
+	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+	// prior to starting Sarama.
+	// See Examples on how to use the metrics registry
+	MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{}
+
+	c.Admin.Timeout = 3 * time.Second
+
+	c.Net.MaxOpenRequests = 5
+	c.Net.DialTimeout = 30 * time.Second
+	c.Net.ReadTimeout = 30 * time.Second
+	c.Net.WriteTimeout = 30 * time.Second
+	c.Net.SASL.Handshake = true
+	c.Net.SASL.Version = SASLHandshakeV0
+
+	c.Metadata.Retry.Max = 3
+	c.Metadata.Retry.Backoff = 250 * time.Millisecond
+	c.Metadata.RefreshFrequency = 10 * time.Minute
+	c.Metadata.Full = true
+
+	c.Producer.MaxMessageBytes = 1000000
+	c.Producer.RequiredAcks = WaitForLocal
+	c.Producer.Timeout = 10 * time.Second
+	c.Producer.Partitioner = NewHashPartitioner
+	c.Producer.Retry.Max = 3
+	c.Producer.Retry.Backoff = 100 * time.Millisecond
+	c.Producer.Return.Errors = true
+	c.Producer.CompressionLevel = CompressionLevelDefault
+
+	c.Consumer.Fetch.Min = 1
+	c.Consumer.Fetch.Default = 1024 * 1024
+	c.Consumer.Retry.Backoff = 2 * time.Second
+	c.Consumer.MaxWaitTime = 250 * time.Millisecond
+	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+	c.Consumer.Return.Errors = false
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
+	c.Consumer.Offsets.Initial = OffsetNewest
+	c.Consumer.Offsets.Retry.Max = 3
+
+	c.Consumer.Group.Session.Timeout = 10 * time.Second
+	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+	c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
+	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+	c.Consumer.Group.Rebalance.Retry.Max = 4
+	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+
+	c.ClientID = defaultClientID
+	c.ChannelBufferSize = 256
+	c.Version = MinVersion
+	c.MetricRegistry = metrics.NewRegistry()
+
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+	// some configuration values should be warned on but not fail completely, do those first
+	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
+		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+	}
+	if !c.Net.SASL.Enable {
+		if c.Net.SASL.User != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+		}
+		if c.Net.SASL.Password != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+		}
+	}
+	if c.Producer.RequiredAcks > 1 {
+		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+	}
+	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+	}
+	if c.Producer.Timeout%time.Millisecond != 0 {
+		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+	}
+	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+	}
+	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.ClientID == defaultClientID {
+		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+	}
+
+	// validate Net values
+	switch {
+	case c.Net.MaxOpenRequests <= 0:
+		return ConfigurationError("Net.MaxOpenRequests must be > 0")
+	case c.Net.DialTimeout <= 0:
+		return ConfigurationError("Net.DialTimeout must be > 0")
+	case c.Net.ReadTimeout <= 0:
+		return ConfigurationError("Net.ReadTimeout must be > 0")
+	case c.Net.WriteTimeout <= 0:
+		return ConfigurationError("Net.WriteTimeout must be > 0")
+	case c.Net.KeepAlive < 0:
+		return ConfigurationError("Net.KeepAlive must be >= 0")
+	case c.Net.SASL.Enable:
+		if c.Net.SASL.Mechanism == "" {
+			c.Net.SASL.Mechanism = SASLTypePlaintext
+		}
+
+		switch c.Net.SASL.Mechanism {
+		case SASLTypePlaintext:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+		case SASLTypeOAuth:
+			if c.Net.SASL.TokenProvider == nil {
+				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
+			}
+		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+			}
+		case SASLTypeGSSAPI:
+			if c.Net.SASL.GSSAPI.ServiceName == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+			}
+
+			if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
+				if c.Net.SASL.GSSAPI.Password == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+				}
+			} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
+				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+						" and  Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+				}
+			} else {
+				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
+			}
+			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Username == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Realm == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+			}
+		default:
+			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
+			return ConfigurationError(msg)
+		}
+	}
+
+	// validate the Admin values
+	switch {
+	case c.Admin.Timeout <= 0:
+		return ConfigurationError("Admin.Timeout must be > 0")
+	}
+
+	// validate the Metadata values
+	switch {
+	case c.Metadata.Retry.Max < 0:
+		return ConfigurationError("Metadata.Retry.Max must be >= 0")
+	case c.Metadata.Retry.Backoff < 0:
+		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+	case c.Metadata.RefreshFrequency < 0:
+		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+	}
+
+	// validate the Producer values
+	switch {
+	case c.Producer.MaxMessageBytes <= 0:
+		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+	case c.Producer.RequiredAcks < -1:
+		return ConfigurationError("Producer.RequiredAcks must be >= -1")
+	case c.Producer.Timeout <= 0:
+		return ConfigurationError("Producer.Timeout must be > 0")
+	case c.Producer.Partitioner == nil:
+		return ConfigurationError("Producer.Partitioner must not be nil")
+	case c.Producer.Flush.Bytes < 0:
+		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+	case c.Producer.Flush.Messages < 0:
+		return ConfigurationError("Producer.Flush.Messages must be >= 0")
+	case c.Producer.Flush.Frequency < 0:
+		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+	case c.Producer.Flush.MaxMessages < 0:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+	case c.Producer.Retry.Max < 0:
+		return ConfigurationError("Producer.Retry.Max must be >= 0")
+	case c.Producer.Retry.Backoff < 0:
+		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+	}
+
+	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+	}
+
+	if c.Producer.Compression == CompressionGZIP {
+		if c.Producer.CompressionLevel != CompressionLevelDefault {
+			if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
+				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+			}
+		}
+	}
+
+	if c.Producer.Idempotent {
+		if !c.Version.IsAtLeast(V0_11_0_0) {
+			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+		}
+		if c.Producer.Retry.Max == 0 {
+			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+		}
+		if c.Producer.RequiredAcks != WaitForAll {
+			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+		}
+		if c.Net.MaxOpenRequests > 1 {
+			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+		}
+	}
+
+	// validate the Consumer values
+	switch {
+	case c.Consumer.Fetch.Min <= 0:
+		return ConfigurationError("Consumer.Fetch.Min must be > 0")
+	case c.Consumer.Fetch.Default <= 0:
+		return ConfigurationError("Consumer.Fetch.Default must be > 0")
+	case c.Consumer.Fetch.Max < 0:
+		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+	case c.Consumer.MaxProcessingTime <= 0:
+		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+	case c.Consumer.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+	case c.Consumer.Offsets.Retry.Max < 0:
+		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+	}
+
+	// validate IsolationLevel
+	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
+	}
+
+	// validate the Consumer Group values
+	switch {
+	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+	case c.Consumer.Group.Rebalance.Strategy == nil:
+		return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
+	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+	case c.Consumer.Group.Rebalance.Retry.Max < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+	}
+
+	// validate misc shared values
+	switch {
+	case c.ChannelBufferSize < 0:
+		return ConfigurationError("ChannelBufferSize must be >= 0")
+	case !validID.MatchString(c.ClientID):
+		return ConfigurationError("ClientID is invalid")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
new file mode 100644
index 0000000..5399d75
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -0,0 +1,22 @@
+package sarama
+
+//ConfigResourceType is a type for config resource
+type ConfigResourceType int8
+
+// Taken from :
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+
+const (
+	//UnknownResource constant type
+	UnknownResource ConfigResourceType = iota
+	//AnyResource constant type
+	AnyResource
+	//TopicResource constant type
+	TopicResource
+	//GroupResource constant type
+	GroupResource
+	//ClusterResource constant type
+	ClusterResource
+	//BrokerResource constant type
+	BrokerResource
+)
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 0000000..72c4d7c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,896 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
+	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
+	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
+
+	Key, Value []byte
+	Topic      string
+	Partition  int32
+	Offset     int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func (ce ConsumerError) Error() string {
+	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+type Consumer interface {
+	// Topics returns the set of available topics as retrieved from the cluster
+	// metadata. This method is the same as Client.Topics(), and is provided for
+	// convenience.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	// This method is the same as Client.Partitions(), and is provided for convenience.
+	Partitions(topic string) ([]int32, error)
+
+	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
+	// the given offset. It will return an error if this Consumer is already consuming
+	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+	// or OffsetOldest
+	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+	// HighWaterMarks returns the current high water marks for each topic and partition.
+	// Consistency between partitions is not guaranteed since high water marks are updated separately.
+	HighWaterMarks() map[string]map[int32]int64
+
+	// Close shuts down the consumer. It must be called after all child
+	// PartitionConsumers have already been closed.
+	Close() error
+}
+
+type consumer struct {
+	conf            *Config
+	children        map[string]map[int32]*partitionConsumer
+	brokerConsumers map[*Broker]*brokerConsumer
+	client          Client
+	lock            sync.Mutex
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newConsumer(client)
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	c := &consumer{
+		client:          client,
+		conf:            client.Config(),
+		children:        make(map[string]map[int32]*partitionConsumer),
+		brokerConsumers: make(map[*Broker]*brokerConsumer),
+	}
+
+	return c, nil
+}
+
+func (c *consumer) Close() error {
+	return c.client.Close()
+}
+
+func (c *consumer) Topics() ([]string, error) {
+	return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+	return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+	child := &partitionConsumer{
+		consumer:  c,
+		conf:      c.conf,
+		topic:     topic,
+		partition: partition,
+		messages:  make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+		errors:    make(chan *ConsumerError, c.conf.ChannelBufferSize),
+		feeder:    make(chan *FetchResponse, 1),
+		trigger:   make(chan none, 1),
+		dying:     make(chan none),
+		fetchSize: c.conf.Consumer.Fetch.Default,
+	}
+
+	if err := child.chooseStartingOffset(offset); err != nil {
+		return nil, err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+		return nil, err
+	}
+
+	if err := c.addChild(child); err != nil {
+		return nil, err
+	}
+
+	go withRecover(child.dispatcher)
+	go withRecover(child.responseFeeder)
+
+	child.broker = c.refBrokerConsumer(leader)
+	child.broker.input <- child
+
+	return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	hwms := make(map[string]map[int32]int64)
+	for topic, p := range c.children {
+		hwm := make(map[int32]int64, len(p))
+		for partition, pc := range p {
+			hwm[partition] = pc.HighWaterMarkOffset()
+		}
+		hwms[topic] = hwm
+	}
+
+	return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	topicChildren := c.children[child.topic]
+	if topicChildren == nil {
+		topicChildren = make(map[int32]*partitionConsumer)
+		c.children[child.topic] = topicChildren
+	}
+
+	if topicChildren[child.partition] != nil {
+		return ConfigurationError("That topic/partition is already being consumed")
+	}
+
+	topicChildren[child.partition] = child
+	return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	bc := c.brokerConsumers[broker]
+	if bc == nil {
+		bc = c.newBrokerConsumer(broker)
+		c.brokerConsumers[broker] = bc
+	}
+
+	bc.refs++
+
+	return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	brokerWorker.refs--
+
+	if brokerWorker.refs == 0 {
+		close(brokerWorker.input)
+		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+			delete(c.brokerConsumers, brokerWorker.broker)
+		}
+	}
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+	// this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+	// the Messages channel when this function is called, you will be competing with Close for messages; consider
+	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+	Close() error
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker.
+	Messages() <-chan *ConsumerMessage
+
+	// Errors returns a read channel of errors that occurred during consuming, if
+	// enabled. By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+	consumer *consumer
+	conf     *Config
+	broker   *brokerConsumer
+	messages chan *ConsumerMessage
+	errors   chan *ConsumerError
+	feeder   chan *FetchResponse
+
+	trigger, dying chan none
+	closeOnce      sync.Once
+	topic          string
+	partition      int32
+	responseResult error
+	fetchSize      int32
+	offset         int64
+	retries        int32
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+	cErr := &ConsumerError{
+		Topic:     child.topic,
+		Partition: child.partition,
+		Err:       err,
+	}
+
+	if child.conf.Consumer.Return.Errors {
+		child.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (child *partitionConsumer) computeBackoff() time.Duration {
+	if child.conf.Consumer.Retry.BackoffFunc != nil {
+		retries := atomic.AddInt32(&child.retries, 1)
+		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
+	}
+	return child.conf.Consumer.Retry.Backoff
+}
+
+func (child *partitionConsumer) dispatcher() {
+	for range child.trigger {
+		select {
+		case <-child.dying:
+			close(child.trigger)
+		case <-time.After(child.computeBackoff()):
+			if child.broker != nil {
+				child.consumer.unrefBrokerConsumer(child.broker)
+				child.broker = nil
+			}
+
+			Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+			if err := child.dispatch(); err != nil {
+				child.sendError(err)
+				child.trigger <- none{}
+			}
+		}
+	}
+
+	if child.broker != nil {
+		child.consumer.unrefBrokerConsumer(child.broker)
+	}
+	child.consumer.removeChild(child)
+	close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+		return err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+		return err
+	}
+
+	child.broker = child.consumer.refBrokerConsumer(leader)
+
+	child.broker.input <- child
+
+	return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+	if err != nil {
+		return err
+	}
+	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case offset == OffsetNewest:
+		child.offset = newestOffset
+	case offset == OffsetOldest:
+		child.offset = oldestOffset
+	case offset >= oldestOffset && offset <= newestOffset:
+		child.offset = offset
+	default:
+		return ErrOffsetOutOfRange
+	}
+
+	return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+	return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+	return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+	// also just close itself)
+	child.closeOnce.Do(func() {
+		close(child.dying)
+	})
+}
+
+func (child *partitionConsumer) Close() error {
+	child.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range child.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+	return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+	var msgs []*ConsumerMessage
+	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+	firstAttempt := true
+
+feederLoop:
+	for response := range child.feeder {
+		msgs, child.responseResult = child.parseResponse(response)
+
+		if child.responseResult == nil {
+			atomic.StoreInt32(&child.retries, 0)
+		}
+
+		for i, msg := range msgs {
+		messageSelect:
+			select {
+			case <-child.dying:
+				child.broker.acks.Done()
+				continue feederLoop
+			case child.messages <- msg:
+				firstAttempt = true
+			case <-expiryTicker.C:
+				if !firstAttempt {
+					child.responseResult = errTimedOut
+					child.broker.acks.Done()
+				remainingLoop:
+					for _, msg = range msgs[i:] {
+						select {
+						case child.messages <- msg:
+						case <-child.dying:
+							break remainingLoop
+						}
+					}
+					child.broker.input <- child
+					continue feederLoop
+				} else {
+					// current message has not been sent, return to select
+					// statement
+					firstAttempt = false
+					goto messageSelect
+				}
+			}
+		}
+
+		child.broker.acks.Done()
+	}
+
+	expiryTicker.Stop()
+	close(child.messages)
+	close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	for _, msgBlock := range msgSet.Messages {
+		for _, msg := range msgBlock.Messages() {
+			offset := msg.Offset
+			timestamp := msg.Msg.Timestamp
+			if msg.Msg.Version >= 1 {
+				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+				offset += baseOffset
+				if msg.Msg.LogAppendTime {
+					timestamp = msgBlock.Msg.Timestamp
+				}
+			}
+			if offset < child.offset {
+				continue
+			}
+			messages = append(messages, &ConsumerMessage{
+				Topic:          child.topic,
+				Partition:      child.partition,
+				Key:            msg.Msg.Key,
+				Value:          msg.Msg.Value,
+				Offset:         offset,
+				Timestamp:      timestamp,
+				BlockTimestamp: msgBlock.Msg.Timestamp,
+			})
+			child.offset = offset + 1
+		}
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+	messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
+	for _, rec := range batch.Records {
+		offset := batch.FirstOffset + rec.OffsetDelta
+		if offset < child.offset {
+			continue
+		}
+		timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
+		if batch.LogAppendTime {
+			timestamp = batch.MaxTimestamp
+		}
+		messages = append(messages, &ConsumerMessage{
+			Topic:     child.topic,
+			Partition: child.partition,
+			Key:       rec.Key,
+			Value:     rec.Value,
+			Offset:    offset,
+			Timestamp: timestamp,
+			Headers:   rec.Headers,
+		})
+		child.offset = offset + 1
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	var (
+		metricRegistry          = child.conf.MetricRegistry
+		consumerBatchSizeMetric metrics.Histogram
+	)
+
+	if metricRegistry != nil {
+		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
+	}
+
+	// If request was throttled and empty we log and return without error
+	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+		Logger.Printf(
+			"consumer/broker/%d FetchResponse throttled %v\n",
+			child.broker.broker.ID(), response.ThrottleTime)
+		return nil, nil
+	}
+
+	block := response.GetBlock(child.topic, child.partition)
+	if block == nil {
+		return nil, ErrIncompleteResponse
+	}
+
+	if block.Err != ErrNoError {
+		return nil, block.Err
+	}
+
+	nRecs, err := block.numRecords()
+	if err != nil {
+		return nil, err
+	}
+
+	consumerBatchSizeMetric.Update(int64(nRecs))
+
+	if nRecs == 0 {
+		partialTrailingMessage, err := block.isPartial()
+		if err != nil {
+			return nil, err
+		}
+		// We got no messages. If we got a trailing one then we need to ask for more data.
+		// Otherwise we just poll again and wait for one to be produced...
+		if partialTrailingMessage {
+			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+				// we can't ask for more data, we've hit the configured limit
+				child.sendError(ErrMessageTooLarge)
+				child.offset++ // skip this one so we can keep processing future messages
+			} else {
+				child.fetchSize *= 2
+				// check int32 overflow
+				if child.fetchSize < 0 {
+					child.fetchSize = math.MaxInt32
+				}
+				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+					child.fetchSize = child.conf.Consumer.Fetch.Max
+				}
+			}
+		}
+
+		return nil, nil
+	}
+
+	// we got messages, reset our fetch size in case it was increased for a previous request
+	child.fetchSize = child.conf.Consumer.Fetch.Default
+	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
+	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+	abortedTransactions := block.getAbortedTransactions()
+
+	messages := []*ConsumerMessage{}
+	for _, records := range block.RecordsSet {
+		switch records.recordsType {
+		case legacyRecords:
+			messageSetMessages, err := child.parseMessages(records.MsgSet)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, messageSetMessages...)
+		case defaultRecords:
+			// Consume remaining abortedTransaction up to last offset of current batch
+			for _, txn := range abortedTransactions {
+				if txn.FirstOffset > records.RecordBatch.LastOffset() {
+					break
+				}
+				abortedProducerIDs[txn.ProducerID] = struct{}{}
+				// Pop abortedTransactions so that we never add it again
+				abortedTransactions = abortedTransactions[1:]
+			}
+
+			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+			if err != nil {
+				return nil, err
+			}
+
+			// Parse and commit offset but do not expose messages that are:
+			// - control records
+			// - part of an aborted transaction when set to `ReadCommitted`
+
+			// control record
+			isControl, err := records.isControl()
+			if err != nil {
+				// I don't know why there is this continue in case of error to begin with
+				// Safe bet is to ignore control messages if ReadUncommitted
+				// and block on them in case of error and ReadCommitted
+				if child.conf.Consumer.IsolationLevel == ReadCommitted {
+					return nil, err
+				}
+				continue
+			}
+			if isControl {
+				controlRecord, err := records.getControlRecord()
+				if err != nil {
+					return nil, err
+				}
+
+				if controlRecord.Type == ControlRecordAbort {
+					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+				}
+				continue
+			}
+
+			// filter aborted transactions
+			if child.conf.Consumer.IsolationLevel == ReadCommitted {
+				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+				if records.RecordBatch.IsTransactional && isAborted {
+					continue
+				}
+			}
+
+			messages = append(messages, recordBatchMessages...)
+		default:
+			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+		}
+	}
+
+	return messages, nil
+}
+
+type brokerConsumer struct {
+	consumer         *consumer
+	broker           *Broker
+	input            chan *partitionConsumer
+	newSubscriptions chan []*partitionConsumer
+	subscriptions    map[*partitionConsumer]none
+	wait             chan none
+	acks             sync.WaitGroup
+	refs             int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+	bc := &brokerConsumer{
+		consumer:         c,
+		broker:           broker,
+		input:            make(chan *partitionConsumer),
+		newSubscriptions: make(chan []*partitionConsumer),
+		wait:             make(chan none),
+		subscriptions:    make(map[*partitionConsumer]none),
+		refs:             0,
+	}
+
+	go withRecover(bc.subscriptionManager)
+	go withRecover(bc.subscriptionConsumer)
+
+	return bc
+}
+
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+// so the main goroutine can block waiting for work if it has none.
+func (bc *brokerConsumer) subscriptionManager() {
+	var buffer []*partitionConsumer
+
+	for {
+		if len(buffer) > 0 {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- buffer:
+				buffer = nil
+			case bc.wait <- none{}:
+			}
+		} else {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- nil:
+			}
+		}
+	}
+
+done:
+	close(bc.wait)
+	if len(buffer) > 0 {
+		bc.newSubscriptions <- buffer
+	}
+	close(bc.newSubscriptions)
+}
+
+//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+func (bc *brokerConsumer) subscriptionConsumer() {
+	<-bc.wait // wait for our first piece of work
+
+	for newSubscriptions := range bc.newSubscriptions {
+		bc.updateSubscriptions(newSubscriptions)
+
+		if len(bc.subscriptions) == 0 {
+			// We're about to be shut down or we're about to receive more subscriptions.
+			// Either way, the signal just hasn't propagated to our goroutine yet.
+			<-bc.wait
+			continue
+		}
+
+		response, err := bc.fetchNewMessages()
+
+		if err != nil {
+			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+			bc.abort(err)
+			return
+		}
+
+		bc.acks.Add(len(bc.subscriptions))
+		for child := range bc.subscriptions {
+			child.feeder <- response
+		}
+		bc.acks.Wait()
+		bc.handleResponses()
+	}
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+	for _, child := range newSubscriptions {
+		bc.subscriptions[child] = none{}
+		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+	}
+
+	for child := range bc.subscriptions {
+		select {
+		case <-child.dying:
+			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		default:
+			// no-op
+		}
+	}
+}
+
+//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+func (bc *brokerConsumer) handleResponses() {
+	for child := range bc.subscriptions {
+		result := child.responseResult
+		child.responseResult = nil
+
+		switch result {
+		case nil:
+			// no-op
+		case errTimedOut:
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+				bc.broker.ID(), child.topic, child.partition)
+			delete(bc.subscriptions, child)
+		case ErrOffsetOutOfRange:
+			// there's no point in retrying this it will just fail the same way again
+			// shut it down and force the user to choose what to do
+			child.sendError(result)
+			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+			// not an error, but does need redispatching
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		default:
+			// dunno, tell the user and try redispatching
+			child.sendError(result)
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		}
+	}
+}
+
+func (bc *brokerConsumer) abort(err error) {
+	bc.consumer.abandonBrokerConsumer(bc)
+	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+	for child := range bc.subscriptions {
+		child.sendError(err)
+		child.trigger <- none{}
+	}
+
+	for newSubscriptions := range bc.newSubscriptions {
+		if len(newSubscriptions) == 0 {
+			<-bc.wait
+			continue
+		}
+		for _, child := range newSubscriptions {
+			child.sendError(err)
+			child.trigger <- none{}
+		}
+	}
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+	request := &FetchRequest{
+		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
+		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
+		request.Version = 1
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 2
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 3
+		request.MaxBytes = MaxResponseSize
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 4
+		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
+	}
+
+	for child := range bc.subscriptions {
+		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+	}
+
+	return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
new file mode 100644
index 0000000..b974dd9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -0,0 +1,873 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+	// Consume joins a cluster of consumers for a given list of topics and
+	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+	//
+	// The life-cycle of a session is represented by the following steps:
+	//
+	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+	//    and is assigned their "fair share" of partitions, aka 'claims'.
+	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
+	//    of the claims and allow any necessary preparation or alteration of state.
+	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+	//    from concurrent reads/writes.
+	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+	//    parent context is cancelled or when a server-side rebalance cycle is initiated.
+	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+	//    to allow the user to perform any final tasks before a rebalance.
+	// 6. Finally, marked offsets are committed one last time before claims are released.
+	//
+	// Please note, that once a rebalance is triggered, sessions must be completed within
+	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+	// commit failures.
+	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
+	// By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan error
+
+	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+	// this function before the object passes out of scope, as it will otherwise leak memory.
+	Close() error
+}
+
+type consumerGroup struct {
+	client Client
+
+	config   *Config
+	consumer Consumer
+	groupID  string
+	memberID string
+	errors   chan error
+
+	lock      sync.Mutex
+	closed    chan none
+	closeOnce sync.Once
+
+	userData []byte
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := newConsumerGroup(groupID, client)
+	if err != nil {
+		_ = client.Close()
+	}
+	return c, err
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
+	config := client.Config()
+	if !config.Version.IsAtLeast(V0_10_2_0) {
+		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+	}
+
+	consumer, err := NewConsumerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+
+	return &consumerGroup{
+		client:   client,
+		consumer: consumer,
+		config:   config,
+		groupID:  groupID,
+		errors:   make(chan error, config.ChannelBufferSize),
+		closed:   make(chan none),
+	}, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+	c.closeOnce.Do(func() {
+		close(c.closed)
+
+		c.lock.Lock()
+		defer c.lock.Unlock()
+
+		// leave group
+		if e := c.leave(); e != nil {
+			err = e
+		}
+
+		// drain errors
+		go func() {
+			close(c.errors)
+		}()
+		for e := range c.errors {
+			err = e
+		}
+
+		if e := c.client.Close(); e != nil {
+			err = e
+		}
+	})
+	return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+	// Ensure group is not closed
+	select {
+	case <-c.closed:
+		return ErrClosedConsumerGroup
+	default:
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	// Quick exit when no topics are provided
+	if len(topics) == 0 {
+		return fmt.Errorf("no topics provided")
+	}
+
+	// Refresh metadata for requested topics
+	if err := c.client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	// Init session
+	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+	if err == ErrClosedClient {
+		return ErrClosedConsumerGroup
+	} else if err != nil {
+		return err
+	}
+
+	// loop check topic partition numbers changed
+	// will trigger rebalance when any topic partitions number had changed
+	go c.loopCheckPartitionNumbers(topics, sess)
+
+	// Wait for session exit signal
+	<-sess.ctx.Done()
+
+	// Gracefully release session claims
+	return sess.release(true)
+}
+
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+	select {
+	case <-c.closed:
+		return nil, ErrClosedConsumerGroup
+	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+	}
+
+	if refreshCoordinator {
+		err := c.client.RefreshCoordinator(c.groupID)
+		if err != nil {
+			return c.retryNewSession(ctx, topics, handler, retries, true)
+		}
+	}
+
+	return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		if retries <= 0 {
+			return nil, err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	}
+
+	// Join consumer group
+	join, err := c.joinGroupRequest(coordinator, topics)
+	if err != nil {
+		_ = coordinator.Close()
+		return nil, err
+	}
+	switch join.Err {
+	case ErrNoError:
+		c.memberID = join.MemberId
+	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+		if retries <= 0 {
+			return nil, join.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrRebalanceInProgress: // retry after backoff
+		if retries <= 0 {
+			return nil, join.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, false)
+	default:
+		return nil, join.Err
+	}
+
+	// Prepare distribution plan if we joined as the leader
+	var plan BalanceStrategyPlan
+	if join.LeaderId == join.MemberId {
+		members, err := join.GetMembers()
+		if err != nil {
+			return nil, err
+		}
+
+		plan, err = c.balance(members)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Sync consumer group
+	sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
+	if err != nil {
+		_ = coordinator.Close()
+		return nil, err
+	}
+	switch sync.Err {
+	case ErrNoError:
+	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
+		if retries <= 0 {
+			return nil, sync.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrRebalanceInProgress: // retry after backoff
+		if retries <= 0 {
+			return nil, sync.Err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, false)
+	default:
+		return nil, sync.Err
+	}
+
+	// Retrieve and sort claims
+	var claims map[string][]int32
+	if len(sync.MemberAssignment) > 0 {
+		members, err := sync.GetMemberAssignment()
+		if err != nil {
+			return nil, err
+		}
+		claims = members.Topics
+		c.userData = members.UserData
+
+		for _, partitions := range claims {
+			sort.Sort(int32Slice(partitions))
+		}
+	}
+
+	return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+	req := &JoinGroupRequest{
+		GroupId:        c.groupID,
+		MemberId:       c.memberID,
+		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+		ProtocolType:   "consumer",
+	}
+	if c.config.Version.IsAtLeast(V0_10_1_0) {
+		req.Version = 1
+		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+	}
+
+	// use static user-data if configured, otherwise use consumer-group userdata from the last sync
+	userData := c.config.Consumer.Group.Member.UserData
+	if len(userData) == 0 {
+		userData = c.userData
+	}
+	meta := &ConsumerGroupMemberMetadata{
+		Topics:   topics,
+		UserData: userData,
+	}
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
+	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+		return nil, err
+	}
+
+	return coordinator.JoinGroup(req)
+}
+
+func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
+	req := &SyncGroupRequest{
+		GroupId:      c.groupID,
+		MemberId:     c.memberID,
+		GenerationId: generationID,
+	}
+	for memberID, topics := range plan {
+		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+
+		// Include topic assignments in group-assignment userdata for each consumer-group member
+		if c.config.Consumer.Group.Rebalance.Strategy.Name() == StickyBalanceStrategyName {
+			userDataBytes, err := encode(&StickyAssignorUserDataV1{
+				Topics:     topics,
+				Generation: generationID,
+			}, nil)
+			if err != nil {
+				return nil, err
+			}
+			assignment.UserData = userDataBytes
+		}
+		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+			return nil, err
+		}
+	}
+	return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+	req := &HeartbeatRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	}
+
+	return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
+	topics := make(map[string][]int32)
+	for _, meta := range members {
+		for _, topic := range meta.Topics {
+			topics[topic] = nil
+		}
+	}
+
+	for topic := range topics {
+		partitions, err := c.client.Partitions(topic)
+		if err != nil {
+			return nil, err
+		}
+		topics[topic] = partitions
+	}
+
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
+	return strategy.Plan(members, topics)
+}
+
+// Leaves the cluster, called by Close, protected by lock.
+func (c *consumerGroup) leave() error {
+	if c.memberID == "" {
+		return nil
+	}
+
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		return err
+	}
+
+	resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
+		GroupId:  c.groupID,
+		MemberId: c.memberID,
+	})
+	if err != nil {
+		_ = coordinator.Close()
+		return err
+	}
+
+	// Unset memberID
+	c.memberID = ""
+
+	// Check response
+	switch resp.Err {
+	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+		return nil
+	default:
+		return resp.Err
+	}
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
+		err = &ConsumerError{
+			Topic:     topic,
+			Partition: partition,
+			Err:       err,
+		}
+	}
+
+	if !c.config.Consumer.Return.Errors {
+		Logger.Println(err)
+		return
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	select {
+	case <-c.closed:
+		//consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
+	}
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
+	pause := time.NewTicker(c.config.Consumer.Group.Heartbeat.Interval * 2)
+	defer session.cancel()
+	defer pause.Stop()
+	var oldTopicToPartitionNum map[string]int
+	var err error
+	if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
+		return
+	}
+	for {
+		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+			return
+		} else {
+			for topic, num := range oldTopicToPartitionNum {
+				if newTopicToPartitionNum[topic] != num {
+					return // trigger the end of the session on exit
+				}
+			}
+		}
+		select {
+		case <-pause.C:
+		case <-c.closed:
+			return
+		}
+	}
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+	if err := c.client.RefreshMetadata(topics...); err != nil {
+		Logger.Printf("Consumer Group refresh metadata failed %v", err)
+		return nil, err
+	}
+	topicToPartitionNum := make(map[string]int, len(topics))
+	for _, topic := range topics {
+		if partitionNum, err := c.client.Partitions(topic); err != nil {
+			Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
+			return nil, err
+		} else {
+			topicToPartitionNum[topic] = len(partitionNum)
+		}
+	}
+	return topicToPartitionNum, nil
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+	// Claims returns information about the claimed partitions by topic.
+	Claims() map[string][]int32
+
+	// MemberID returns the cluster member ID.
+	MemberID() string
+
+	// GenerationID returns the current generation ID.
+	GenerationID() int32
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+	// MarkMessage marks a message as consumed.
+	MarkMessage(msg *ConsumerMessage, metadata string)
+
+	// Context returns the session context.
+	Context() context.Context
+}
+
+type consumerGroupSession struct {
+	parent       *consumerGroup
+	memberID     string
+	generationID int32
+	handler      ConsumerGroupHandler
+
+	claims  map[string][]int32
+	offsets *offsetManager
+	ctx     context.Context
+	cancel  func()
+
+	waitGroup       sync.WaitGroup
+	releaseOnce     sync.Once
+	hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+	// init offset manager
+	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
+	if err != nil {
+		return nil, err
+	}
+
+	// init context
+	ctx, cancel := context.WithCancel(ctx)
+
+	// init session
+	sess := &consumerGroupSession{
+		parent:       parent,
+		memberID:     memberID,
+		generationID: generationID,
+		handler:      handler,
+		offsets:      offsets,
+		claims:       claims,
+		ctx:          ctx,
+		cancel:       cancel,
+		hbDying:      make(chan none),
+		hbDead:       make(chan none),
+	}
+
+	// start heartbeat loop
+	go sess.heartbeatLoop()
+
+	// create a POM for each claim
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			pom, err := offsets.ManagePartition(topic, partition)
+			if err != nil {
+				_ = sess.release(false)
+				return nil, err
+			}
+
+			// handle POM errors
+			go func(topic string, partition int32) {
+				for err := range pom.Errors() {
+					sess.parent.handleError(err, topic, partition)
+				}
+			}(topic, partition)
+		}
+	}
+
+	// perform setup
+	if err := handler.Setup(sess); err != nil {
+		_ = sess.release(true)
+		return nil, err
+	}
+
+	// start consuming
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			sess.waitGroup.Add(1)
+
+			go func(topic string, partition int32) {
+				defer sess.waitGroup.Done()
+
+				// cancel the as session as soon as the first
+				// goroutine exits
+				defer sess.cancel()
+
+				// consume a single topic/partition, blocking
+				sess.consume(topic, partition)
+			}(topic, partition)
+		}
+	}
+	return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string           { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.MarkOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.ResetOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+	return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+	// quick exit if rebalance is due
+	select {
+	case <-s.ctx.Done():
+		return
+	case <-s.parent.closed:
+		return
+	default:
+	}
+
+	// get next offset
+	offset := s.parent.config.Consumer.Offsets.Initial
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		offset, _ = pom.NextOffset()
+	}
+
+	// create new claim
+	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+	if err != nil {
+		s.parent.handleError(err, topic, partition)
+		return
+	}
+
+	// handle errors
+	go func() {
+		for err := range claim.Errors() {
+			s.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	// trigger close when session is done
+	go func() {
+		select {
+		case <-s.ctx.Done():
+		case <-s.parent.closed:
+		}
+		claim.AsyncClose()
+	}()
+
+	// start processing
+	if err := s.handler.ConsumeClaim(s, claim); err != nil {
+		s.parent.handleError(err, topic, partition)
+	}
+
+	// ensure consumer is closed & drained
+	claim.AsyncClose()
+	for _, err := range claim.waitClosed() {
+		s.parent.handleError(err, topic, partition)
+	}
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+	// signal release, stop heartbeat
+	s.cancel()
+
+	// wait for consumers to exit
+	s.waitGroup.Wait()
+
+	// perform release
+	s.releaseOnce.Do(func() {
+		if withCleanup {
+			if e := s.handler.Cleanup(s); e != nil {
+				s.parent.handleError(e, "", -1)
+				err = e
+			}
+		}
+
+		if e := s.offsets.Close(); e != nil {
+			err = e
+		}
+
+		close(s.hbDying)
+		<-s.hbDead
+	})
+
+	return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+	defer close(s.hbDead)
+	defer s.cancel() // trigger the end of the session on exit
+
+	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+	defer pause.Stop()
+
+	retries := s.parent.config.Metadata.Retry.Max
+	for {
+		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+		if err != nil {
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+
+			select {
+			case <-s.hbDying:
+				return
+			case <-time.After(s.parent.config.Metadata.Retry.Backoff):
+				retries--
+			}
+			continue
+		}
+
+		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+		if err != nil {
+			_ = coordinator.Close()
+
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+
+			retries--
+			continue
+		}
+
+		switch resp.Err {
+		case ErrNoError:
+			retries = s.parent.config.Metadata.Retry.Max
+		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
+			return
+		default:
+			s.parent.handleError(err, "", -1)
+			return
+		}
+
+		select {
+		case <-pause.C:
+		case <-s.hbDying:
+			return
+		}
+	}
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+	// Setup is run at the beginning of a new session, before ConsumeClaim.
+	Setup(ConsumerGroupSession) error
+
+	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+	// but before the offsets are committed for the very last time.
+	Cleanup(ConsumerGroupSession) error
+
+	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+	// Once the Messages() channel is closed, the Handler must finish its processing
+	// loop and exit.
+	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+	// Topic returns the consumed topic name.
+	Topic() string
+
+	// Partition returns the consumed partition.
+	Partition() int32
+
+	// InitialOffset returns the initial offset that was used as a starting point for this claim.
+	InitialOffset() int64
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker. The messages channel will be closed when a new rebalance cycle
+	// is due. You must finish processing and mark offsets within
+	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+	// re-assigned to another group member.
+	Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+	topic     string
+	partition int32
+	offset    int64
+	PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	if err == ErrOffsetOutOfRange {
+		offset = sess.parent.config.Consumer.Offsets.Initial
+		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		for err := range pcm.Errors() {
+			sess.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	return &consumerGroupClaim{
+		topic:             topic,
+		partition:         partition,
+		offset:            offset,
+		PartitionConsumer: pcm,
+	}, nil
+}
+
+func (c *consumerGroupClaim) Topic() string        { return c.topic }
+func (c *consumerGroupClaim) Partition() int32     { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+	go func() {
+		for range c.Messages() {
+		}
+	}()
+
+	for err := range c.Errors() {
+		errs = append(errs, err)
+	}
+	return
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d02cc3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,96 @@
+package sarama
+
+//ConsumerGroupMemberMetadata holds the metadata for consumer group
+type ConsumerGroupMemberMetadata struct {
+	Version  int16
+	Topics   []string
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putStringArray(m.Topics); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	if m.Topics, err = pd.getStringArray(); err != nil {
+		return
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+//ConsumerGroupMemberAssignment holds the member assignment for a consume group
+type ConsumerGroupMemberAssignment struct {
+	Version  int16
+	Topics   map[string][]int32
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..a8dcaef
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,34 @@
+package sarama
+
+//ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+	ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+	tmp := new(FindCoordinatorRequest)
+	tmp.CoordinatorKey = r.ConsumerGroup
+	tmp.CoordinatorType = CoordinatorGroup
+	return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorRequest)
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+	r.ConsumerGroup = tmp.CoordinatorKey
+	return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..f39a871
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import (
+	"net"
+	"strconv"
+)
+
+//ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+	Err             KError
+	Coordinator     *Broker
+	CoordinatorID   int32  // deprecated: use Coordinator.ID()
+	CoordinatorHost string // deprecated: use Coordinator.Addr()
+	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorResponse)
+
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+
+	r.Err = tmp.Err
+
+	r.Coordinator = tmp.Coordinator
+	if tmp.Coordinator == nil {
+		return nil
+	}
+
+	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+	// backwards compatibility
+	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+	if err != nil {
+		return err
+	}
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+	r.CoordinatorID = r.Coordinator.ID()
+	r.CoordinatorHost = host
+	r.CoordinatorPort = int32(port)
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+	if r.Coordinator == nil {
+		r.Coordinator = new(Broker)
+		r.Coordinator.id = r.CoordinatorID
+		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+	}
+
+	tmp := &FindCoordinatorResponse{
+		Version:     0,
+		Err:         r.Err,
+		Coordinator: r.Coordinator,
+	}
+
+	if err := tmp.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go
new file mode 100644
index 0000000..9b75ab5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/control_record.go
@@ -0,0 +1,72 @@
+package sarama
+
+//ControlRecordType ...
+type ControlRecordType int
+
+const (
+	//ControlRecordAbort is a control record for abort
+	ControlRecordAbort ControlRecordType = iota
+	//ControlRecordCommit is a control record for commit
+	ControlRecordCommit
+	//ControlRecordUnknown is a control record of unknown type
+	ControlRecordUnknown
+)
+
+// Control records are returned as a record by fetchRequest
+// However unlike "normal" records, they mean nothing application wise.
+// They only serve internal logic for supporting transactions.
+type ControlRecord struct {
+	Version          int16
+	CoordinatorEpoch int32
+	Type             ControlRecordType
+}
+
+func (cr *ControlRecord) decode(key, value packetDecoder) error {
+	var err error
+	cr.Version, err = value.getInt16()
+	if err != nil {
+		return err
+	}
+
+	cr.CoordinatorEpoch, err = value.getInt32()
+	if err != nil {
+		return err
+	}
+
+	// There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
+	// Either way, all these version can only be 0 for now
+	cr.Version, err = key.getInt16()
+	if err != nil {
+		return err
+	}
+
+	recordType, err := key.getInt16()
+	if err != nil {
+		return err
+	}
+
+	switch recordType {
+	case 0:
+		cr.Type = ControlRecordAbort
+	case 1:
+		cr.Type = ControlRecordCommit
+	default:
+		// from JAVA implementation:
+		// UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
+		cr.Type = ControlRecordUnknown
+	}
+	return nil
+}
+
+func (cr *ControlRecord) encode(key, value packetEncoder) {
+	value.putInt16(cr.Version)
+	value.putInt32(cr.CoordinatorEpoch)
+	key.putInt16(cr.Version)
+
+	switch cr.Type {
+	case ControlRecordAbort:
+		key.putInt16(0)
+	case ControlRecordCommit:
+		key.putInt16(1)
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 0000000..38189a3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,86 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"sync"
+)
+
+type crcPolynomial int8
+
+const (
+	crcIEEE crcPolynomial = iota
+	crcCastagnoli
+)
+
+var crc32FieldPool = sync.Pool{}
+
+func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
+	val := crc32FieldPool.Get()
+	if val != nil {
+		c := val.(*crc32Field)
+		c.polynomial = polynomial
+		return c
+	}
+	return newCRC32Field(polynomial)
+}
+
+func releaseCrc32Field(c *crc32Field) {
+	crc32FieldPool.Put(c)
+}
+
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+	startOffset int
+	polynomial  crcPolynomial
+}
+
+func (c *crc32Field) saveOffset(in int) {
+	c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+	return 4
+}
+
+func newCRC32Field(polynomial crcPolynomial) *crc32Field {
+	return &crc32Field{polynomial: polynomial}
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+	return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+
+	expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+	if crc != expected {
+		return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
+	}
+
+	return nil
+}
+func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
+	var tab *crc32.Table
+	switch c.polynomial {
+	case crcIEEE:
+		tab = crc32.IEEETable
+	case crcCastagnoli:
+		tab = castagnoliTable
+	default:
+		return 0, PacketDecodingError{"invalid CRC type"}
+	}
+	return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
new file mode 100644
index 0000000..af321e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -0,0 +1,121 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+	TopicPartitions map[string]*TopicPartition
+	Timeout         time.Duration
+	ValidateOnly    bool
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partition := range c.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	pe.putBool(c.ValidateOnly)
+
+	return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	c.TopicPartitions = make(map[string]*TopicPartition, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitions[topic] = new(TopicPartition)
+		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if c.ValidateOnly, err = pd.getBool(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartition struct {
+	Count      int32
+	Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+	pe.putInt32(t.Count)
+
+	if len(t.Assignment) == 0 {
+		pe.putInt32(-1)
+		return nil
+	}
+
+	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+		return err
+	}
+
+	for _, assign := range t.Assignment {
+		if err := pe.putInt32Array(assign); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+	if t.Count, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	n, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if n <= 0 {
+		return nil
+	}
+	t.Assignment = make([][]int32, n)
+
+	for i := 0; i < int(n); i++ {
+		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
new file mode 100644
index 0000000..bb18204
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -0,0 +1,105 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreatePartitionsResponse struct {
+	ThrottleTime         time.Duration
+	TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+		return err
+	}
+
+	for topic, partitionError := range c.TopicPartitionErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partitionError.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartitionError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicPartitionError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+	pe.putInt16(int16(t.Err))
+
+	if err := pe.putNullableString(t.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kerr)
+
+	if t.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
new file mode 100644
index 0000000..709c0a4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -0,0 +1,174 @@
+package sarama
+
+import (
+	"time"
+)
+
+type CreateTopicsRequest struct {
+	Version int16
+
+	TopicDetails map[string]*TopicDetail
+	Timeout      time.Duration
+	ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+		return err
+	}
+	for topic, detail := range c.TopicDetails {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := detail.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	if c.Version >= 1 {
+		pe.putBool(c.ValidateOnly)
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicDetails = make(map[string]*TopicDetail, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicDetails[topic] = new(TopicDetail)
+		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if version >= 1 {
+		c.ValidateOnly, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		c.Version = version
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicDetail struct {
+	NumPartitions     int32
+	ReplicationFactor int16
+	ReplicaAssignment map[int32][]int32
+	ConfigEntries     map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+	pe.putInt32(t.NumPartitions)
+	pe.putInt16(t.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+		return err
+	}
+	for partition, assignment := range t.ReplicaAssignment {
+		pe.putInt32(partition)
+		if err := pe.putInt32Array(assignment); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range t.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+	if t.NumPartitions, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ReplicaAssignment = make(map[int32][]int32, n)
+		for i := 0; i < n; i++ {
+			replica, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+				return err
+			}
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
new file mode 100644
index 0000000..a493e02
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreateTopicsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	TopicErrors  map[string]*TopicError
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+	if c.Version >= 2 {
+		pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+		return err
+	}
+	for topic, topicError := range c.TopicErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := topicError.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+
+	if version >= 2 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicErrors = make(map[string]*TopicError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicErrors[topic] = new(TopicError)
+		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+	pe.putInt16(int16(t.Err))
+
+	if version >= 1 {
+		if err := pe.putNullableString(t.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+	kErr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kErr)
+
+	if version >= 1 {
+		if t.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
new file mode 100644
index 0000000..eaccbfc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/decompress.go
@@ -0,0 +1,63 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"sync"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+var (
+	lz4ReaderPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewReader(nil)
+		},
+	}
+
+	gzipReaderPool sync.Pool
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var (
+			err        error
+			reader     *gzip.Reader
+			readerIntf = gzipReaderPool.Get()
+		)
+		if readerIntf != nil {
+			reader = readerIntf.(*gzip.Reader)
+		} else {
+			reader, err = gzip.NewReader(bytes.NewReader(data))
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		defer gzipReaderPool.Put(reader)
+
+		if err := reader.Reset(bytes.NewReader(data)); err != nil {
+			return nil, err
+		}
+
+		return ioutil.ReadAll(reader)
+	case CompressionSnappy:
+		return snappy.Decode(data)
+	case CompressionLZ4:
+		reader := lz4ReaderPool.Get().(*lz4.Reader)
+		defer lz4ReaderPool.Put(reader)
+
+		reader.Reset(bytes.NewReader(data))
+		return ioutil.ReadAll(reader)
+	case CompressionZSTD:
+		return zstdDecompress(nil, data)
+	default:
+		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
new file mode 100644
index 0000000..305a324
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+	Groups []string
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+	return pe.putStringArray(r.Groups)
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+	return 42
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+	return V1_1_0_0
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
new file mode 100644
index 0000000..c067ebb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go
@@ -0,0 +1,70 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteGroupsResponse struct {
+	ThrottleTime    time.Duration
+	GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+		return err
+	}
+	for groupID, errorCode := range r.GroupErrorCodes {
+		if err := pe.putString(groupID); err != nil {
+			return err
+		}
+		pe.putInt16(int16(errorCode))
+	}
+
+	return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupErrorCodes = make(map[string]KError, n)
+	for i := 0; i < n; i++ {
+		groupID, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		errorCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+
+		r.GroupErrorCodes[groupID] = KError(errorCode)
+	}
+
+	return nil
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+	return 42
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+	return V1_1_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
new file mode 100644
index 0000000..93efafd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+import (
+	"sort"
+	"time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+	Topics  map[string]*DeleteRecordsRequestTopic
+	Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsRequestTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+	return 21
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type DeleteRecordsRequestTopic struct {
+	PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.PartitionOffsets))
+	for partition := range t.PartitionOffsets {
+		keys = append(keys, partition)
+	}
+	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		pe.putInt64(t.PartitionOffsets[partition])
+	}
+	return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.PartitionOffsets = make(map[int32]int64, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			offset, err := pd.getInt64()
+			if err != nil {
+				return err
+			}
+			t.PartitionOffsets[partition] = offset
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
new file mode 100644
index 0000000..733a58b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_response.go
@@ -0,0 +1,158 @@
+package sarama
+
+import (
+	"sort"
+	"time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
+	d.Version = version
+
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponseTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+	return 21
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type DeleteRecordsResponseTopic struct {
+	Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.Partitions))
+	for partition := range t.Partitions {
+		keys = append(keys, partition)
+	}
+	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		if err := t.Partitions[partition].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponsePartition)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			t.Partitions[partition] = details
+		}
+	}
+
+	return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+	LowWatermark int64
+	Err          KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt64(t.LowWatermark)
+	pe.putInt16(int16(t.Err))
+	return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+	lowWatermark, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	t.LowWatermark = lowWatermark
+
+	kErr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kErr)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
new file mode 100644
index 0000000..911f67d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+	Version int16
+	Topics  []string
+	Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(d.Topics); err != nil {
+		return err
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	if d.Topics, err = pd.getStringArray(); err != nil {
+		return err
+	}
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+	d.Version = version
+	return nil
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
new file mode 100644
index 0000000..3422546
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+	if d.Version >= 1 {
+		pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+		return err
+	}
+	for topic, errorCode := range d.TopicErrorCodes {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		pe.putInt16(int16(errorCode))
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+		d.Version = version
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.TopicErrorCodes = make(map[string]KError, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		errorCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+
+		d.TopicErrorCodes[topic] = KError(errorCode)
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
new file mode 100644
index 0000000..ccb587b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -0,0 +1,112 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+	Version         int16
+	Resources       []*ConfigResource
+	IncludeSynonyms bool
+}
+
+type ConfigResource struct {
+	Type        ConfigResourceType
+	Name        string
+	ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		pe.putInt8(int8(c.Type))
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+
+		if len(c.ConfigNames) == 0 {
+			pe.putInt32(-1)
+			continue
+		}
+		if err := pe.putStringArray(c.ConfigNames); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putBool(r.IncludeSynonyms)
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ConfigResource, n)
+
+	for i := 0; i < n; i++ {
+		r.Resources[i] = &ConfigResource{}
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Type = ConfigResourceType(t)
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Name = name
+
+		confLength, err := pd.getArrayLength()
+
+		if err != nil {
+			return err
+		}
+
+		if confLength == -1 {
+			continue
+		}
+
+		cfnames := make([]string, confLength)
+		for i := 0; i < confLength; i++ {
+			s, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			cfnames[i] = s
+		}
+		r.Resources[i].ConfigNames = cfnames
+	}
+	r.Version = version
+	if r.Version >= 1 {
+		b, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeSynonyms = b
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_1_0_0
+	case 2:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
new file mode 100644
index 0000000..5737232
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+	switch s {
+	case SourceUnknown:
+		return "Unknown"
+	case SourceTopic:
+		return "Topic"
+	case SourceDynamicBroker:
+		return "DynamicBroker"
+	case SourceDynamicDefaultBroker:
+		return "DynamicDefaultBroker"
+	case SourceStaticBroker:
+		return "StaticBroker"
+	case SourceDefault:
+		return "Default"
+	}
+	return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+	SourceUnknown ConfigSource = iota
+	SourceTopic
+	SourceDynamicBroker
+	SourceDynamicDefaultBroker
+	SourceStaticBroker
+	SourceDefault
+)
+
+type DescribeConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*ResourceResponse
+}
+
+type ResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+	Configs   []*ConfigEntry
+}
+
+type ConfigEntry struct {
+	Name      string
+	Value     string
+	ReadOnly  bool
+	Default   bool
+	Source    ConfigSource
+	Sensitive bool
+	Synonyms  []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+	ConfigName  string
+	ConfigValue string
+	Source      ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	if err = pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		if err = c.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ResourceResponse, n)
+	for i := 0; i < n; i++ {
+		rr := &ResourceResponse{}
+		if err := rr.decode(pd, version); err != nil {
+			return err
+		}
+		r.Resources[i] = rr
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	case 2:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err = pe.putString(r.ErrorMsg); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(r.Type))
+
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Configs {
+		if err = c.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+	ec, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = ec
+
+	em, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.ErrorMsg = em
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Configs = make([]*ConfigEntry, n)
+	for i := 0; i < n; i++ {
+		c := &ConfigEntry{}
+		if err := c.decode(pd, version); err != nil {
+			return err
+		}
+		r.Configs[i] = c
+	}
+	return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putString(r.Value); err != nil {
+		return err
+	}
+
+	pe.putBool(r.ReadOnly)
+
+	if version <= 0 {
+		pe.putBool(r.Default)
+		pe.putBool(r.Sensitive)
+	} else {
+		pe.putInt8(int8(r.Source))
+		pe.putBool(r.Sensitive)
+
+		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+			return err
+		}
+		for _, c := range r.Synonyms {
+			if err = c.encode(pe, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+	if version == 0 {
+		r.Source = SourceUnknown
+	}
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Value = value
+
+	read, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.ReadOnly = read
+
+	if version == 0 {
+		defaultB, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.Default = defaultB
+	} else {
+		source, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Source = ConfigSource(source)
+	}
+
+	sensitive, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Sensitive = sensitive
+
+	if version > 0 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Synonyms = make([]*ConfigSynonym, n)
+
+		for i := 0; i < n; i++ {
+			s := &ConfigSynonym{}
+			if err := s.decode(pd, version); err != nil {
+				return err
+			}
+			r.Synonyms[i] = s
+		}
+
+	}
+	return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+	err = pe.putString(c.ConfigName)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putString(c.ConfigValue)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(c.Source))
+
+	return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+	name, err := pd.getString()
+	if err != nil {
+		return nil
+	}
+	c.ConfigName = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return nil
+	}
+	c.ConfigValue = value
+
+	source, err := pd.getInt8()
+	if err != nil {
+		return nil
+	}
+	c.Source = ConfigSource(source)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 0000000..1fb3567
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+	Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+	return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 0000000..542b3a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+	Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+
+	for _, groupDescription := range r.Groups {
+		if err := groupDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Groups = make([]*GroupDescription, n)
+	for i := 0; i < n; i++ {
+		r.Groups[i] = new(GroupDescription)
+		if err := r.Groups[i].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+type GroupDescription struct {
+	Err          KError
+	GroupId      string
+	State        string
+	ProtocolType string
+	Protocol     string
+	Members      map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+	pe.putInt16(int16(gd.Err))
+
+	if err := pe.putString(gd.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.State); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.ProtocolType); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.Protocol); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(gd.Members)); err != nil {
+		return err
+	}
+
+	for memberId, groupMemberDescription := range gd.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := groupMemberDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	gd.Err = KError(kerr)
+
+	if gd.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.State, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.Protocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	gd.Members = make(map[string]*GroupMemberDescription)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		gd.Members[memberId] = new(GroupMemberDescription)
+		if err := gd.Members[memberId].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type GroupMemberDescription struct {
+	ClientId         string
+	ClientHost       string
+	MemberMetadata   []byte
+	MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+	if err := pe.putString(gmd.ClientId); err != nil {
+		return err
+	}
+	if err := pe.putString(gmd.ClientHost); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+	if gmd.ClientId, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.ClientHost, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+		return
+	}
+	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(gmd.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+	metadata := new(ConsumerGroupMemberMetadata)
+	err := decode(gmd.MemberMetadata, metadata)
+	return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..cb1e781
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
@@ -0,0 +1,83 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	// If this is an empty array, all topics will be queried
+	DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+	Topic        string
+	PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+	length := len(r.DescribeTopics)
+	if length == 0 {
+		// In order to query all topics we must send null
+		length = -1
+	}
+
+	if err := pe.putArrayLength(length); err != nil {
+		return err
+	}
+
+	for _, d := range r.DescribeTopics {
+		if err := pe.putString(d.Topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	topics := make([]DescribeLogDirsRequestTopic, n)
+	for i := 0; i < n; i++ {
+		topics[i] = DescribeLogDirsRequestTopic{}
+
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		topics[i].Topic = topic
+
+		pIDs, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		topics[i].PartitionIDs = pIDs
+	}
+	r.DescribeTopics = topics
+
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..d207312
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
@@ -0,0 +1,219 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+	ThrottleTime time.Duration
+
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	LogDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+		return err
+	}
+
+	for _, dir := range r.LogDirs {
+		if err := dir.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	// Decode array of DescribeLogDirsResponseDirMetadata
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+	for i := 0; i < n; i++ {
+		dir := DescribeLogDirsResponseDirMetadata{}
+		if err := dir.decode(pd, version); err != nil {
+			return err
+		}
+		r.LogDirs[i] = dir
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+	return 35
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+	ErrorCode KError
+
+	// The absolute log directory path
+	Path   string
+	Topics []DescribeLogDirsResponseTopic
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.ErrorCode))
+
+	if err := pe.putString(r.Path); err != nil {
+		return err
+	}
+
+	for _, topic := range r.Topics {
+		if err := topic.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = KError(errCode)
+
+	path, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Path = path
+
+	// Decode array of DescribeLogDirsResponseTopic
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]DescribeLogDirsResponseTopic, n)
+	for i := 0; i < n; i++ {
+		t := DescribeLogDirsResponseTopic{}
+
+		if err := t.decode(pd, version); err != nil {
+			return err
+		}
+
+		r.Topics[i] = t
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+	Topic      string
+	Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Topic); err != nil {
+		return err
+	}
+
+	for _, partition := range r.Partitions {
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Topic = t
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+	for i := 0; i < n; i++ {
+		p := DescribeLogDirsResponsePartition{}
+		if err := p.decode(pd, version); err != nil {
+			return err
+		}
+		r.Partitions[i] = p
+	}
+
+	return nil
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+	PartitionID int32
+
+	// The size of the log segments of the partition in bytes.
+	Size int64
+
+	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+	// current replica's LEO (if it is the future log for the partition)
+	OffsetLag int64
+
+	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+	// the replica in the future.
+	IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt32(r.PartitionID)
+	pe.putInt64(r.Size)
+	pe.putInt64(r.OffsetLag)
+	pe.putBool(r.IsTemporary)
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+	pID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.PartitionID = pID
+
+	size, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.Size = size
+
+	lag, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.OffsetLag = lag
+
+	isTemp, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.IsTemporary = isTemp
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 0000000..4c030de
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+  - go:
+      version: '1.13.4'
+
+commands:
+  test:
+    run: make test
+    desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 0000000..7ce3bc0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+	"fmt"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+	encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+	if e == nil {
+		return nil, nil
+	}
+
+	var prepEnc prepEncoder
+	var realEnc realEncoder
+
+	err := e.encode(&prepEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+	}
+
+	realEnc.raw = make([]byte, prepEnc.length)
+	realEnc.registry = metricRegistry
+	err = e.encode(&realEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+	decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+	decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper, version)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
new file mode 100644
index 0000000..2cd9b50
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -0,0 +1,50 @@
+package sarama
+
+type EndTxnRequest struct {
+	TransactionalID   string
+	ProducerID        int64
+	ProducerEpoch     int16
+	TransactionResult bool
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	pe.putBool(a.TransactionResult)
+
+	return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.TransactionResult, err = pd.getBool(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+	return 26
+}
+
+func (a *EndTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
new file mode 100644
index 0000000..33b27e3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+	"time"
+)
+
+type EndTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(e.Err))
+	return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	e.Err = KError(kerr)
+
+	return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+	return 25
+}
+
+func (e *EndTxnResponse) version() int16 {
+	return 0
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 0000000..97be3c0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,369 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+	Info string
+}
+
+func (err PacketEncodingError) Error() string {
+	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+	Info string
+}
+
+func (err PacketDecodingError) Error() string {
+	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+	return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// MultiError is used to contain multi error.
+type MultiError struct {
+	Errors *[]error
+}
+
+func (mErr MultiError) Error() string {
+	var errString = ""
+	for _, err := range *mErr.Errors {
+		errString += err.Error() + ","
+	}
+	return errString
+}
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+type ErrDeleteRecords struct {
+	MultiError
+}
+
+func (err ErrDeleteRecords) Error() string {
+	return "kafka server: failed to delete records " + err.MultiError.Error()
+}
+
+// Numeric error codes returned by the Kafka server.
+const (
+	ErrNoError                            KError = 0
+	ErrUnknown                            KError = -1
+	ErrOffsetOutOfRange                   KError = 1
+	ErrInvalidMessage                     KError = 2
+	ErrUnknownTopicOrPartition            KError = 3
+	ErrInvalidMessageSize                 KError = 4
+	ErrLeaderNotAvailable                 KError = 5
+	ErrNotLeaderForPartition              KError = 6
+	ErrRequestTimedOut                    KError = 7
+	ErrBrokerNotAvailable                 KError = 8
+	ErrReplicaNotAvailable                KError = 9
+	ErrMessageSizeTooLarge                KError = 10
+	ErrStaleControllerEpochCode           KError = 11
+	ErrOffsetMetadataTooLarge             KError = 12
+	ErrNetworkException                   KError = 13
+	ErrOffsetsLoadInProgress              KError = 14
+	ErrConsumerCoordinatorNotAvailable    KError = 15
+	ErrNotCoordinatorForConsumer          KError = 16
+	ErrInvalidTopic                       KError = 17
+	ErrMessageSetSizeTooLarge             KError = 18
+	ErrNotEnoughReplicas                  KError = 19
+	ErrNotEnoughReplicasAfterAppend       KError = 20
+	ErrInvalidRequiredAcks                KError = 21
+	ErrIllegalGeneration                  KError = 22
+	ErrInconsistentGroupProtocol          KError = 23
+	ErrInvalidGroupId                     KError = 24
+	ErrUnknownMemberId                    KError = 25
+	ErrInvalidSessionTimeout              KError = 26
+	ErrRebalanceInProgress                KError = 27
+	ErrInvalidCommitOffsetSize            KError = 28
+	ErrTopicAuthorizationFailed           KError = 29
+	ErrGroupAuthorizationFailed           KError = 30
+	ErrClusterAuthorizationFailed         KError = 31
+	ErrInvalidTimestamp                   KError = 32
+	ErrUnsupportedSASLMechanism           KError = 33
+	ErrIllegalSASLState                   KError = 34
+	ErrUnsupportedVersion                 KError = 35
+	ErrTopicAlreadyExists                 KError = 36
+	ErrInvalidPartitions                  KError = 37
+	ErrInvalidReplicationFactor           KError = 38
+	ErrInvalidReplicaAssignment           KError = 39
+	ErrInvalidConfig                      KError = 40
+	ErrNotController                      KError = 41
+	ErrInvalidRequest                     KError = 42
+	ErrUnsupportedForMessageFormat        KError = 43
+	ErrPolicyViolation                    KError = 44
+	ErrOutOfOrderSequenceNumber           KError = 45
+	ErrDuplicateSequenceNumber            KError = 46
+	ErrInvalidProducerEpoch               KError = 47
+	ErrInvalidTxnState                    KError = 48
+	ErrInvalidProducerIDMapping           KError = 49
+	ErrInvalidTransactionTimeout          KError = 50
+	ErrConcurrentTransactions             KError = 51
+	ErrTransactionCoordinatorFenced       KError = 52
+	ErrTransactionalIDAuthorizationFailed KError = 53
+	ErrSecurityDisabled                   KError = 54
+	ErrOperationNotAttempted              KError = 55
+	ErrKafkaStorageError                  KError = 56
+	ErrLogDirNotFound                     KError = 57
+	ErrSASLAuthenticationFailed           KError = 58
+	ErrUnknownProducerID                  KError = 59
+	ErrReassignmentInProgress             KError = 60
+	ErrDelegationTokenAuthDisabled        KError = 61
+	ErrDelegationTokenNotFound            KError = 62
+	ErrDelegationTokenOwnerMismatch       KError = 63
+	ErrDelegationTokenRequestNotAllowed   KError = 64
+	ErrDelegationTokenAuthorizationFailed KError = 65
+	ErrDelegationTokenExpired             KError = 66
+	ErrInvalidPrincipalType               KError = 67
+	ErrNonEmptyGroup                      KError = 68
+	ErrGroupIDNotFound                    KError = 69
+	ErrFetchSessionIDNotFound             KError = 70
+	ErrInvalidFetchSessionEpoch           KError = 71
+	ErrListenerNotFound                   KError = 72
+	ErrTopicDeletionDisabled              KError = 73
+	ErrFencedLeaderEpoch                  KError = 74
+	ErrUnknownLeaderEpoch                 KError = 75
+	ErrUnsupportedCompressionType         KError = 76
+	ErrStaleBrokerEpoch                   KError = 77
+	ErrOffsetNotAvailable                 KError = 78
+	ErrMemberIdRequired                   KError = 79
+	ErrPreferredLeaderNotAvailable        KError = 80
+	ErrGroupMaxSizeReached                KError = 81
+	ErrFencedInstancedId                  KError = 82
+)
+
+func (err KError) Error() string {
+	// Error messages stolen/adapted from
+	// https://kafka.apache.org/protocol#protocol_error_codes
+	switch err {
+	case ErrNoError:
+		return "kafka server: Not an error, why are you printing me?"
+	case ErrUnknown:
+		return "kafka server: Unexpected (unknown?) server error."
+	case ErrOffsetOutOfRange:
+		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+	case ErrInvalidMessage:
+		return "kafka server: Message contents does not match its CRC."
+	case ErrUnknownTopicOrPartition:
+		return "kafka server: Request was for a topic or partition that does not exist on this broker."
+	case ErrInvalidMessageSize:
+		return "kafka server: The message has a negative size."
+	case ErrLeaderNotAvailable:
+		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+	case ErrNotLeaderForPartition:
+		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+	case ErrRequestTimedOut:
+		return "kafka server: Request exceeded the user-specified time limit in the request."
+	case ErrBrokerNotAvailable:
+		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+	case ErrReplicaNotAvailable:
+		return "kafka server: Replica information not available, one or more brokers are down."
+	case ErrMessageSizeTooLarge:
+		return "kafka server: Message was too large, server rejected it to avoid allocation error."
+	case ErrStaleControllerEpochCode:
+		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+	case ErrOffsetMetadataTooLarge:
+		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+	case ErrNetworkException:
+		return "kafka server: The server disconnected before a response was received."
+	case ErrOffsetsLoadInProgress:
+		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+	case ErrConsumerCoordinatorNotAvailable:
+		return "kafka server: Offset's topic has not yet been created."
+	case ErrNotCoordinatorForConsumer:
+		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+	case ErrInvalidTopic:
+		return "kafka server: The request attempted to perform an operation on an invalid topic."
+	case ErrMessageSetSizeTooLarge:
+		return "kafka server: The request included message batch larger than the configured segment size on the server."
+	case ErrNotEnoughReplicas:
+		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+	case ErrNotEnoughReplicasAfterAppend:
+		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+	case ErrInvalidRequiredAcks:
+		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+	case ErrIllegalGeneration:
+		return "kafka server: The provided generation id is not the current generation."
+	case ErrInconsistentGroupProtocol:
+		return "kafka server: The provider group protocol type is incompatible with the other members."
+	case ErrInvalidGroupId:
+		return "kafka server: The provided group id was empty."
+	case ErrUnknownMemberId:
+		return "kafka server: The provided member is not known in the current generation."
+	case ErrInvalidSessionTimeout:
+		return "kafka server: The provided session timeout is outside the allowed range."
+	case ErrRebalanceInProgress:
+		return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+	case ErrInvalidCommitOffsetSize:
+		return "kafka server: The provided commit metadata was too large."
+	case ErrTopicAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this topic."
+	case ErrGroupAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this group."
+	case ErrClusterAuthorizationFailed:
+		return "kafka server: The client is not authorized to send this request type."
+	case ErrInvalidTimestamp:
+		return "kafka server: The timestamp of the message is out of acceptable range."
+	case ErrUnsupportedSASLMechanism:
+		return "kafka server: The broker does not support the requested SASL mechanism."
+	case ErrIllegalSASLState:
+		return "kafka server: Request is not valid given the current SASL state."
+	case ErrUnsupportedVersion:
+		return "kafka server: The version of API is not supported."
+	case ErrTopicAlreadyExists:
+		return "kafka server: Topic with this name already exists."
+	case ErrInvalidPartitions:
+		return "kafka server: Number of partitions is invalid."
+	case ErrInvalidReplicationFactor:
+		return "kafka server: Replication-factor is invalid."
+	case ErrInvalidReplicaAssignment:
+		return "kafka server: Replica assignment is invalid."
+	case ErrInvalidConfig:
+		return "kafka server: Configuration is invalid."
+	case ErrNotController:
+		return "kafka server: This is not the correct controller for this cluster."
+	case ErrInvalidRequest:
+		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+	case ErrUnsupportedForMessageFormat:
+		return "kafka server: The requested operation is not supported by the message format version."
+	case ErrPolicyViolation:
+		return "kafka server: Request parameters do not satisfy the configured policy."
+	case ErrOutOfOrderSequenceNumber:
+		return "kafka server: The broker received an out of order sequence number."
+	case ErrDuplicateSequenceNumber:
+		return "kafka server: The broker received a duplicate sequence number."
+	case ErrInvalidProducerEpoch:
+		return "kafka server: Producer attempted an operation with an old epoch."
+	case ErrInvalidTxnState:
+		return "kafka server: The producer attempted a transactional operation in an invalid state."
+	case ErrInvalidProducerIDMapping:
+		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
+	case ErrInvalidTransactionTimeout:
+		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
+	case ErrConcurrentTransactions:
+		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
+	case ErrTransactionCoordinatorFenced:
+		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
+	case ErrTransactionalIDAuthorizationFailed:
+		return "kafka server: Transactional ID authorization failed."
+	case ErrSecurityDisabled:
+		return "kafka server: Security features are disabled."
+	case ErrOperationNotAttempted:
+		return "kafka server: The broker did not attempt to execute this operation."
+	case ErrKafkaStorageError:
+		return "kafka server: Disk error when trying to access log file on the disk."
+	case ErrLogDirNotFound:
+		return "kafka server: The specified log directory is not found in the broker config."
+	case ErrSASLAuthenticationFailed:
+		return "kafka server: SASL Authentication failed."
+	case ErrUnknownProducerID:
+		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
+	case ErrReassignmentInProgress:
+		return "kafka server: A partition reassignment is in progress."
+	case ErrDelegationTokenAuthDisabled:
+		return "kafka server: Delegation Token feature is not enabled."
+	case ErrDelegationTokenNotFound:
+		return "kafka server: Delegation Token is not found on server."
+	case ErrDelegationTokenOwnerMismatch:
+		return "kafka server: Specified Principal is not valid Owner/Renewer."
+	case ErrDelegationTokenRequestNotAllowed:
+		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
+	case ErrDelegationTokenAuthorizationFailed:
+		return "kafka server: Delegation Token authorization failed."
+	case ErrDelegationTokenExpired:
+		return "kafka server: Delegation Token is expired."
+	case ErrInvalidPrincipalType:
+		return "kafka server: Supplied principalType is not supported."
+	case ErrNonEmptyGroup:
+		return "kafka server: The group is not empty."
+	case ErrGroupIDNotFound:
+		return "kafka server: The group id does not exist."
+	case ErrFetchSessionIDNotFound:
+		return "kafka server: The fetch session ID was not found."
+	case ErrInvalidFetchSessionEpoch:
+		return "kafka server: The fetch session epoch is invalid."
+	case ErrListenerNotFound:
+		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
+	case ErrTopicDeletionDisabled:
+		return "kafka server: Topic deletion is disabled."
+	case ErrFencedLeaderEpoch:
+		return "kafka server: The leader epoch in the request is older than the epoch on the broker."
+	case ErrUnknownLeaderEpoch:
+		return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
+	case ErrUnsupportedCompressionType:
+		return "kafka server: The requesting client does not support the compression type of given partition."
+	case ErrStaleBrokerEpoch:
+		return "kafka server: Broker epoch has changed"
+	case ErrOffsetNotAvailable:
+		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+	case ErrMemberIdRequired:
+		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+	case ErrPreferredLeaderNotAvailable:
+		return "kafka server: The preferred leader was not available"
+	case ErrGroupMaxSizeReached:
+		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
+	case ErrFencedInstancedId:
+		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
+	}
+
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 0000000..4db9ddd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,170 @@
+package sarama
+
+type fetchRequestBlock struct {
+	fetchOffset int64
+	maxBytes    int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+	pe.putInt64(b.fetchOffset)
+	pe.putInt32(b.maxBytes)
+	return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+	if b.fetchOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if b.maxBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+	MaxWaitTime int32
+	MinBytes    int32
+	MaxBytes    int32
+	Version     int16
+	Isolation   IsolationLevel
+	blocks      map[string]map[int32]*fetchRequestBlock
+}
+
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = iota
+	ReadCommitted
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+	pe.putInt32(-1) // replica ID is always -1 for clients
+	pe.putInt32(r.MaxWaitTime)
+	pe.putInt32(r.MinBytes)
+	if r.Version >= 3 {
+		pe.putInt32(r.MaxBytes)
+	}
+	if r.Version >= 4 {
+		pe.putInt8(int8(r.Isolation))
+	}
+	err = pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, blocks := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(blocks))
+		if err != nil {
+			return err
+		}
+		for partition, block := range blocks {
+			pe.putInt32(partition)
+			err = block.encode(pe)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if _, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MinBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.MaxBytes, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 4 {
+		isolation, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Isolation = IsolationLevel(isolation)
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			fetchBlock := &fetchRequestBlock{}
+			if err = fetchBlock.decode(pd); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = fetchBlock
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) key() int16 {
+	return 1
+}
+
+func (r *FetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+	}
+
+	tmp := new(fetchRequestBlock)
+	tmp.maxBytes = maxBytes
+	tmp.fetchOffset = fetchOffset
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 0000000..3afc187
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,489 @@
+package sarama
+
+import (
+	"sort"
+	"time"
+)
+
+type AbortedTransaction struct {
+	ProducerID  int64
+	FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if t.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+	pe.putInt64(t.ProducerID)
+	pe.putInt64(t.FirstOffset)
+
+	return nil
+}
+
+type FetchResponseBlock struct {
+	Err                 KError
+	HighWaterMarkOffset int64
+	LastStableOffset    int64
+	AbortedTransactions []*AbortedTransaction
+	Records             *Records // deprecated: use FetchResponseBlock.RecordsSet
+	RecordsSet          []*Records
+	Partial             bool
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.HighWaterMarkOffset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 4 {
+		b.LastStableOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		numTransact, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numTransact >= 0 {
+			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+		}
+
+		for i := 0; i < numTransact; i++ {
+			transact := new(AbortedTransaction)
+			if err = transact.decode(pd); err != nil {
+				return err
+			}
+			b.AbortedTransactions[i] = transact
+		}
+	}
+
+	recordsSize, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	recordsDecoder, err := pd.getSubset(int(recordsSize))
+	if err != nil {
+		return err
+	}
+
+	b.RecordsSet = []*Records{}
+
+	for recordsDecoder.remaining() > 0 {
+		records := &Records{}
+		if err := records.decode(recordsDecoder); err != nil {
+			// If we have at least one decoded records, this is not an error
+			if err == ErrInsufficientData {
+				if len(b.RecordsSet) == 0 {
+					b.Partial = true
+				}
+				break
+			}
+			return err
+		}
+
+		partial, err := records.isPartial()
+		if err != nil {
+			return err
+		}
+
+		n, err := records.numRecords()
+		if err != nil {
+			return err
+		}
+
+		if n > 0 || (partial && len(b.RecordsSet) == 0) {
+			b.RecordsSet = append(b.RecordsSet, records)
+
+			if b.Records == nil {
+				b.Records = records
+			}
+		}
+
+		overflow, err := records.isOverflow()
+		if err != nil {
+			return err
+		}
+
+		if partial || overflow {
+			break
+		}
+	}
+
+	return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+	sum := 0
+
+	for _, records := range b.RecordsSet {
+		count, err := records.numRecords()
+		if err != nil {
+			return 0, err
+		}
+
+		sum += count
+	}
+
+	return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+	if b.Partial {
+		return true, nil
+	}
+
+	if len(b.RecordsSet) == 1 {
+		return b.RecordsSet[0].isPartial()
+	}
+
+	return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	pe.putInt64(b.HighWaterMarkOffset)
+
+	if version >= 4 {
+		pe.putInt64(b.LastStableOffset)
+
+		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+			return err
+		}
+		for _, transact := range b.AbortedTransactions {
+			if err = transact.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.push(&lengthField{})
+	for _, records := range b.RecordsSet {
+		err = records.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return pe.pop()
+}
+
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+	at := b.AbortedTransactions
+	sort.Slice(
+		at,
+		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+	)
+	return at
+}
+
+type FetchResponse struct {
+	Blocks        map[string]map[int32]*FetchResponseBlock
+	ThrottleTime  time.Duration
+	Version       int16 // v1 requires 0.9+, v2 requires 0.10+
+	LogAppendTime bool
+	Timestamp     time.Time
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.Version >= 1 {
+		throttle, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(FetchResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+
+		for id, block := range partitions {
+			pe.putInt32(id)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+
+	}
+	return nil
+}
+
+func (r *FetchResponse) key() int16 {
+	return 1
+}
+
+func (r *FetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+	frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+
+	return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+	var kb []byte
+	var vb []byte
+	if key != nil {
+		kb, _ = key.Encode()
+	}
+	if value != nil {
+		vb, _ = value.Encode()
+	}
+
+	return kb, vb
+}
+
+func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if r.LogAppendTime {
+		timestamp = r.Timestamp
+	}
+	msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
+	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newLegacyRecords(&MessageSet{})
+		frb.RecordsSet = []*Records{&records}
+	}
+	set := frb.RecordsSet[0].MsgSet
+	set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+}
+
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+
+	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: isTransactional,
+	}
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+	records.RecordBatch = batch
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+
+	// batch
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: true,
+		Control:         true,
+	}
+
+	// records
+	records := newDefaultRecords(nil)
+	records.RecordBatch = batch
+
+	// record
+	crAbort := ControlRecord{
+		Version: 0,
+		Type:    recordType,
+	}
+	crKey := &realEncoder{raw: make([]byte, 4)}
+	crValue := &realEncoder{raw: make([]byte, 6)}
+	crAbort.encode(crKey, crValue)
+	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
+}
+
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+	// define controlRecord key and value
+	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+	frb := r.getOrCreateBlock(topic, partition)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..ff2ad20
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -0,0 +1,61 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+	CoordinatorGroup CoordinatorType = iota
+	CoordinatorTransaction
+)
+
+type FindCoordinatorRequest struct {
+	Version         int16
+	CoordinatorKey  string
+	CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(f.CoordinatorKey); err != nil {
+		return err
+	}
+
+	if f.Version >= 1 {
+		pe.putInt8(int8(f.CoordinatorType))
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+	if f.CoordinatorKey, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		f.Version = version
+		coordinatorType, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		f.CoordinatorType = CoordinatorType(coordinatorType)
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+	return 10
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+	return f.Version
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..9c900e8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
@@ -0,0 +1,92 @@
+package sarama
+
+import (
+	"time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	Coordinator  *Broker
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		f.Version = version
+
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	}
+
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	f.Err = KError(tmp)
+
+	if version >= 1 {
+		if f.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	coordinator := new(Broker)
+	// The version is hardcoded to 0, as version 1 of the Broker-decode
+	// contains the rack-field which is not present in the FindCoordinatorResponse.
+	if err := coordinator.decode(pd, 0); err != nil {
+		return err
+	}
+	if coordinator.addr == ":0" {
+		return nil
+	}
+	f.Coordinator = coordinator
+
+	return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+	if f.Version >= 1 {
+		pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
+	}
+
+	pe.putInt16(int16(f.Err))
+
+	if f.Version >= 1 {
+		if err := pe.putNullableString(f.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	coordinator := f.Coordinator
+	if coordinator == nil {
+		coordinator = NoNode
+	}
+	if err := coordinator.encode(pe, 0); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+	return 10
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+	return f.Version
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod
new file mode 100644
index 0000000..8ba2c91
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/go.mod
@@ -0,0 +1,29 @@
+module github.com/Shopify/sarama
+
+go 1.13
+
+require (
+	github.com/Shopify/toxiproxy v2.1.4+incompatible
+	github.com/davecgh/go-spew v1.1.1
+	github.com/eapache/go-resiliency v1.1.0
+	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
+	github.com/eapache/queue v1.1.0
+	github.com/fortytw2/leaktest v1.3.0
+	github.com/frankban/quicktest v1.4.1 // indirect
+	github.com/golang/snappy v0.0.1 // indirect
+	github.com/hashicorp/go-uuid v1.0.1 // indirect
+	github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 // indirect
+	github.com/klauspost/compress v1.9.7
+	github.com/pierrec/lz4 v2.2.6+incompatible
+	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
+	github.com/stretchr/testify v1.3.0
+	github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
+	github.com/xdg/stringprep v1.0.0 // indirect
+	golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
+	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+	gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
+	gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
+	gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
+	gopkg.in/jcmturner/gokrb5.v7 v7.2.3
+	gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
+)
diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum
new file mode 100644
index 0000000..7f61258
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/go.sum
@@ -0,0 +1,69 @@
+github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
+github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
+github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
+github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI=
+github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=
+github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
+github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
+github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
+gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..57f3ecb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"encoding/asn1"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
+	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+	"gopkg.in/jcmturner/gokrb5.v7/messages"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+const (
+	TOK_ID_KRB_AP_REQ   = 256
+	GSS_API_GENERIC_TAG = 0x60
+	KRB5_USER_AUTH      = 1
+	KRB5_KEYTAB_AUTH    = 2
+	GSS_API_INITIAL     = 1
+	GSS_API_VERIFY      = 2
+	GSS_API_FINISH      = 3
+)
+
+type GSSAPIConfig struct {
+	AuthType           int
+	KeyTabPath         string
+	KerberosConfigPath string
+	ServiceName        string
+	Username           string
+	Password           string
+	Realm              string
+}
+
+type GSSAPIKerberosAuth struct {
+	Config                *GSSAPIConfig
+	ticket                messages.Ticket
+	encKey                types.EncryptionKey
+	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+	step                  int
+}
+
+type KerberosClient interface {
+	Login() error
+	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+	Domain() string
+	CName() types.PrincipalName
+	Destroy()
+}
+
+/*
+*
+* Appends length in big endian before payload, and send it to kafka
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+	length := len(payload)
+	finalPackage := make([]byte, length+4) //4 byte length header + payload
+	copy(finalPackage[4:], payload)
+	binary.BigEndian.PutUint32(finalPackage, uint32(length))
+	bytes, err := broker.conn.Write(finalPackage)
+	if err != nil {
+		return bytes, err
+	}
+	return bytes, nil
+}
+
+/*
+*
+* Read length (4 bytes) and then read the payload
+*
+ */
+
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+	bytesRead := 0
+	lengthInBytes := make([]byte, 4)
+	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+	if err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += bytes
+	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+	payloadBytes := make([]byte, payloadLength)         // buffer for read..
+	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+	if err != nil {
+		return payloadBytes, bytesRead, err
+	}
+	bytesRead += bytes
+	return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+	a := make([]byte, 24)
+	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+	binary.LittleEndian.PutUint32(a[:4], 16)
+	for _, i := range flags {
+		f := binary.LittleEndian.Uint32(a[20:24])
+		f |= uint32(i)
+		binary.LittleEndian.PutUint32(a[20:24], f)
+	}
+	return a
+}
+
+/*
+*
+* Construct Kerberos AP_REQ package, conforming to RFC-4120
+* https://tools.ietf.org/html/rfc4120#page-84
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+	domain string, cname types.PrincipalName,
+	ticket messages.Ticket,
+	sessionKey types.EncryptionKey) ([]byte, error) {
+	auth, err := types.NewAuthenticator(domain, cname)
+	if err != nil {
+		return nil, err
+	}
+	auth.Cksum = types.Checksum{
+		CksumType: chksumtype.GSSAPI,
+		Checksum:  krbAuth.newAuthenticatorChecksum(),
+	}
+	APReq, err := messages.NewAPReq(
+		ticket,
+		sessionKey,
+		auth,
+	)
+	if err != nil {
+		return nil, err
+	}
+	aprBytes := make([]byte, 2)
+	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+	tb, err := APReq.Marshal()
+	if err != nil {
+		return nil, err
+	}
+	aprBytes = append(aprBytes, tb...)
+	return aprBytes, nil
+}
+
+/*
+*
+*	Append the GSS-API header to the payload, conforming to RFC-2743
+*	Section 3.1, Mechanism-Independent Token Format
+*
+*	https://tools.ietf.org/html/rfc2743#page-81
+*
+*	GSSAPIHeader + <specific mechanism payload>
+*
+ */
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+	oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
+	if err != nil {
+		return nil, err
+	}
+	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+	GSSHeader = append(GSSHeader, oidBytes...)
+	GSSPackage := append(GSSHeader, payload...)
+	return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
+	switch krbAuth.step {
+	case GSS_API_INITIAL:
+		aprBytes, err := krbAuth.createKrb5Token(
+			kerberosClient.Domain(),
+			kerberosClient.CName(),
+			krbAuth.ticket,
+			krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_VERIFY
+		return krbAuth.appendGSSAPIHeader(aprBytes)
+	case GSS_API_VERIFY:
+		wrapTokenReq := gssapi.WrapToken{}
+		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+			return nil, err
+		}
+		// Validate response.
+		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if !isValid {
+			return nil, err
+		}
+
+		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_FINISH
+		return wrapTokenResponse.Marshal()
+	}
+	return nil, nil
+}
+
+/* This does the handshake for authorization */
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+
+	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client error: %s", err)
+		return err
+	}
+
+	err = kerberosClient.Login()
+	if err != nil {
+		Logger.Printf("Kerberos client error: %s", err)
+		return err
+	}
+	// Construct SPN using serviceName and host
+	// SPN format: <SERVICE>/<FQDN>
+
+	host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
+	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+
+	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
+
+	if err != nil {
+		Logger.Printf("Error getting Kerberos service ticket : %s", err)
+		return err
+	}
+	krbAuth.ticket = ticket
+	krbAuth.encKey = encKey
+	krbAuth.step = GSS_API_INITIAL
+	var receivedBytes []byte = nil
+	defer kerberosClient.Destroy()
+	for {
+		packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
+		if err != nil {
+			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+			return err
+		}
+		requestTime := time.Now()
+		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+		if err != nil {
+			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+			return err
+		}
+		broker.updateOutgoingCommunicationMetrics(bytesWritten)
+		if krbAuth.step == GSS_API_VERIFY {
+			var bytesRead = 0
+			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+			requestLatency := time.Since(requestTime)
+			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+			if err != nil {
+				Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
+				return err
+			}
+		} else if krbAuth.step == GSS_API_FINISH {
+			return nil
+		}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 0000000..ce49c47
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+	GroupId      string
+	GenerationId int32
+	MemberId     string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 0000000..766f5fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+	Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..8ceb6c2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -0,0 +1,43 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+	TransactionalID    *string
+	TransactionTimeout time.Duration
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+	if err := pe.putNullableString(i.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+	if i.TransactionalID, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..1b32eb0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -0,0 +1,55 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+	ThrottleTime  time.Duration
+	Err           KError
+	ProducerID    int64
+	ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(i.Err))
+	pe.putInt64(i.ProducerID)
+	pe.putInt16(i.ProducerEpoch)
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	i.Err = KError(kerr)
+
+	if i.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 0000000..97e9299
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,163 @@
+package sarama
+
+type GroupProtocol struct {
+	Name     string
+	Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+	p.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	p.Metadata, err = pd.getBytes()
+	return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(p.Name); err != nil {
+		return err
+	}
+	if err := pe.putBytes(p.Metadata); err != nil {
+		return err
+	}
+	return nil
+}
+
+type JoinGroupRequest struct {
+	Version               int16
+	GroupId               string
+	SessionTimeout        int32
+	RebalanceTimeout      int32
+	MemberId              string
+	ProtocolType          string
+	GroupProtocols        map[string][]byte // deprecated; use OrderedGroupProtocols
+	OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	pe.putInt32(r.SessionTimeout)
+	if r.Version >= 1 {
+		pe.putInt32(r.RebalanceTimeout)
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.ProtocolType); err != nil {
+		return err
+	}
+
+	if len(r.GroupProtocols) > 0 {
+		if len(r.OrderedGroupProtocols) > 0 {
+			return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+		}
+
+		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+			return err
+		}
+		for name, metadata := range r.GroupProtocols {
+			if err := pe.putString(name); err != nil {
+				return err
+			}
+			if err := pe.putBytes(metadata); err != nil {
+				return err
+			}
+		}
+	} else {
+		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+			return err
+		}
+		for _, protocol := range r.OrderedGroupProtocols {
+			if err := protocol.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.SessionTimeout, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if version >= 1 {
+		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupProtocols = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		protocol := &GroupProtocol{}
+		if err := protocol.decode(pd); err != nil {
+			return err
+		}
+		r.GroupProtocols[protocol.Name] = protocol.Metadata
+		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	default:
+		return V0_9_0_0
+	}
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+		Name:     name,
+		Metadata: metadata,
+	})
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+	bin, err := encode(metadata, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupProtocol(name, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 0000000..5752acc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,135 @@
+package sarama
+
+type JoinGroupResponse struct {
+	Version       int16
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+	for id, bin := range r.Members {
+		meta := new(ConsumerGroupMemberMetadata)
+		if err := decode(bin, meta); err != nil {
+			return nil, err
+		}
+		members[id] = *meta
+	}
+	return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putInt16(int16(r.Err))
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.GroupProtocol); err != nil {
+		return err
+	}
+	if err := pe.putString(r.LeaderId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Members)); err != nil {
+		return err
+	}
+
+	for memberId, memberMetadata := range r.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+
+		if err := pe.putBytes(memberMetadata); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 2 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return
+		}
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.GroupProtocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.LeaderId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Members = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		memberMetadata, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.Members[memberId] = memberMetadata
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	default:
+		return V0_9_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
new file mode 100644
index 0000000..91b998f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/kerberos_client.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+	krb5client "gopkg.in/jcmturner/gokrb5.v7/client"
+	krb5config "gopkg.in/jcmturner/gokrb5.v7/config"
+	"gopkg.in/jcmturner/gokrb5.v7/keytab"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KerberosGoKrb5Client struct {
+	krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+	return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+	return c.Credentials.CName()
+}
+
+/*
+*
+* Create kerberos client used to obtain TGT and TGS tokens
+* used gokrb5 library, which is a pure go kerberos client with
+* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO
+* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120).
+*
+ */
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+	cfg, err := krb5config.Load(config.KerberosConfigPath)
+	if err != nil {
+		return nil, err
+	}
+	return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+	var client *krb5client.Client
+	if config.AuthType == KRB5_KEYTAB_AUTH {
+		kt, err := keytab.Load(config.KeyTabPath)
+		if err != nil {
+			return nil, err
+		}
+		client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg)
+	} else {
+		client = krb5client.NewClientWithPassword(config.Username,
+			config.Realm, config.Password, cfg)
+	}
+	return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 0000000..e177427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+	GroupId  string
+	MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 0000000..d60c626
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+	Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 0000000..7d864f6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,99 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"sync"
+)
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+	startOffset int
+	length      int32
+}
+
+var lengthFieldPool = sync.Pool{}
+
+func acquireLengthField() *lengthField {
+	val := lengthFieldPool.Get()
+	if val != nil {
+		return val.(*lengthField)
+	}
+	return &lengthField{}
+}
+
+func releaseLengthField(m *lengthField) {
+	lengthFieldPool.Put(m)
+}
+
+func (l *lengthField) decode(pd packetDecoder) error {
+	var err error
+	l.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if l.length > int32(pd.remaining()) {
+		return ErrInsufficientData
+	}
+	return nil
+}
+
+func (l *lengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+	return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+	binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+	return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+	if int32(curOffset-l.startOffset-4) != l.length {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
+
+type varintLengthField struct {
+	startOffset int
+	length      int64
+}
+
+func (l *varintLengthField) decode(pd packetDecoder) error {
+	var err error
+	l.length, err = pd.getVarint()
+	return err
+}
+
+func (l *varintLengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *varintLengthField) adjustLength(currOffset int) int {
+	oldFieldSize := l.reserveLength()
+	l.length = int64(currOffset - l.startOffset - oldFieldSize)
+
+	return l.reserveLength() - oldFieldSize
+}
+
+func (l *varintLengthField) reserveLength() int {
+	var tmp [binary.MaxVarintLen64]byte
+	return binary.PutVarint(tmp[:], l.length)
+}
+
+func (l *varintLengthField) run(curOffset int, buf []byte) error {
+	binary.PutVarint(buf[l.startOffset:], l.length)
+	return nil
+}
+
+func (l *varintLengthField) check(curOffset int, buf []byte) error {
+	if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 0000000..3b16abf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 0000000..56115d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+	Err    KError
+	Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+	for groupId, protocolType := range r.Groups {
+		if err := pe.putString(groupId); err != nil {
+			return err
+		}
+		if err := pe.putString(protocolType); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Groups = make(map[string]string)
+	for i := 0; i < n; i++ {
+		groupId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		protocolType, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		r.Groups[groupId] = protocolType
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 0000000..7c54748
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,175 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+const (
+	//CompressionNone no compression
+	CompressionNone CompressionCodec = iota
+	//CompressionGZIP compression using GZIP
+	CompressionGZIP
+	//CompressionSnappy compression using snappy
+	CompressionSnappy
+	//CompressionLZ4 compression using LZ4
+	CompressionLZ4
+	//CompressionZSTD compression using ZSTD
+	CompressionZSTD
+
+	// The lowest 3 bits contain the compression codec used for the message
+	compressionCodecMask int8 = 0x07
+
+	// Bit 3 set for "LogAppend" timestamps
+	timestampTypeMask = 0x08
+
+	// CompressionLevelDefault is the constant to use in CompressionLevel
+	// to have the default compression level for any codec. The value is picked
+	// that we don't use any existing compression levels.
+	CompressionLevelDefault = -1000
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+func (cc CompressionCodec) String() string {
+	return []string{
+		"none",
+		"gzip",
+		"snappy",
+		"lz4",
+		"zstd",
+	}[int(cc)]
+}
+
+//Message is a kafka message type
+type Message struct {
+	Codec            CompressionCodec // codec used to compress the message contents
+	CompressionLevel int              // compression level
+	LogAppendTime    bool             // the used timestamp is LogAppendTime
+	Key              []byte           // the message key, may be nil
+	Value            []byte           // the message contents
+	Set              *MessageSet      // the message set a message might wrap
+	Version          int8             // v1 requires Kafka 0.10
+	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
+
+	compressedCache []byte
+	compressedSize  int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+	pe.push(newCRC32Field(crcIEEE))
+
+	pe.putInt8(m.Version)
+
+	attributes := int8(m.Codec) & compressionCodecMask
+	if m.LogAppendTime {
+		attributes |= timestampTypeMask
+	}
+	pe.putInt8(attributes)
+
+	if m.Version >= 1 {
+		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+			return err
+		}
+	}
+
+	err := pe.putBytes(m.Key)
+	if err != nil {
+		return err
+	}
+
+	var payload []byte
+
+	if m.compressedCache != nil {
+		payload = m.compressedCache
+		m.compressedCache = nil
+	} else if m.Value != nil {
+
+		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+		if err != nil {
+			return err
+		}
+		m.compressedCache = payload
+		// Keep in mind the compressed payload size for metric gathering
+		m.compressedSize = len(payload)
+	}
+
+	if err = pe.putBytes(payload); err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+	crc32Decoder := acquireCrc32Field(crcIEEE)
+	defer releaseCrc32Field(crc32Decoder)
+
+	err = pd.push(crc32Decoder)
+	if err != nil {
+		return err
+	}
+
+	m.Version, err = pd.getInt8()
+	if err != nil {
+		return err
+	}
+
+	if m.Version > 1 {
+		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+	}
+
+	attribute, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	m.Codec = CompressionCodec(attribute & compressionCodecMask)
+	m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
+
+	if m.Version == 1 {
+		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+			return err
+		}
+	}
+
+	m.Key, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	m.Value, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	// Required for deep equal assertion during tests but might be useful
+	// for future metrics about the compression ratio in fetch requests
+	m.compressedSize = len(m.Value)
+
+	switch m.Codec {
+	case CompressionNone:
+		// nothing to do
+	default:
+		if m.Value == nil {
+			break
+		}
+
+		m.Value, err = decompress(m.Codec, m.Value)
+		if err != nil {
+			return err
+		}
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	}
+
+	return pd.pop()
+}
+
+// decodes a message set from a previously encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+	pd := realDecoder{raw: m.Value}
+	m.Set = &MessageSet{}
+	return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 0000000..6523ec2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,111 @@
+package sarama
+
+type MessageBlock struct {
+	Offset int64
+	Msg    *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+	if msb.Msg.Set != nil {
+		return msb.Msg.Set.Messages
+	}
+	return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+	pe.putInt64(msb.Offset)
+	pe.push(&lengthField{})
+	err := msb.Msg.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+	if msb.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	lengthDecoder := acquireLengthField()
+	defer releaseLengthField(lengthDecoder)
+
+	if err = pd.push(lengthDecoder); err != nil {
+		return err
+	}
+
+	msb.Msg = new(Message)
+	if err = msb.Msg.decode(pd); err != nil {
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type MessageSet struct {
+	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+	OverflowMessage        bool // whether the set on the wire contained an overflow message
+	Messages               []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+	for i := range ms.Messages {
+		err := ms.Messages[i].encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+	ms.Messages = nil
+
+	for pd.remaining() > 0 {
+		magic, err := magicValue(pd)
+		if err != nil {
+			if err == ErrInsufficientData {
+				ms.PartialTrailingMessage = true
+				return nil
+			}
+			return err
+		}
+
+		if magic > 1 {
+			return nil
+		}
+
+		msb := new(MessageBlock)
+		err = msb.decode(pd)
+		switch err {
+		case nil:
+			ms.Messages = append(ms.Messages, msb)
+		case ErrInsufficientData:
+			// As an optimization the server is allowed to return a partial message at the
+			// end of the message set. Clients should handle this case. So we just ignore such things.
+			if msb.Offset == -1 {
+				// This is an overflow message caused by chunked down conversion
+				ms.OverflowMessage = true
+			} else {
+				ms.PartialTrailingMessage = true
+			}
+			return nil
+		default:
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+	block := new(MessageBlock)
+	block.Msg = msg
+	ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 0000000..1b590d3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type MetadataRequest struct {
+	Version                int16
+	Topics                 []string
+	AllowAutoTopicCreation bool
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 5 {
+		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+	}
+	if r.Version == 0 || len(r.Topics) > 0 {
+		err := pe.putArrayLength(len(r.Topics))
+		if err != nil {
+			return err
+		}
+
+		for i := range r.Topics {
+			err = pe.putString(r.Topics[i])
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		pe.putInt32(-1)
+	}
+	if r.Version > 3 {
+		pe.putBool(r.AllowAutoTopicCreation)
+	}
+	return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+	size, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if size > 0 {
+		r.Topics = make([]string, size)
+		for i := range r.Topics {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			r.Topics[i] = topic
+		}
+	}
+	if r.Version > 3 {
+		autoCreation, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.AllowAutoTopicCreation = autoCreation
+	}
+	return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+	return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_0_0
+	case 2:
+		return V0_10_1_0
+	case 3, 4:
+		return V0_11_0_0
+	case 5:
+		return V1_0_0_0
+	default:
+		return MinVersion
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 0000000..b2d532e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,322 @@
+package sarama
+
+type PartitionMetadata struct {
+	Err             KError
+	ID              int32
+	Leader          int32
+	Replicas        []int32
+	Isr             []int32
+	OfflineReplicas []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	pm.Err = KError(tmp)
+
+	pm.ID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Leader, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Replicas, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	pm.Isr, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		pm.OfflineReplicas, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(pm.Err))
+	pe.putInt32(pm.ID)
+	pe.putInt32(pm.Leader)
+
+	err = pe.putInt32Array(pm.Replicas)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putInt32Array(pm.Isr)
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		err = pe.putInt32Array(pm.OfflineReplicas)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type TopicMetadata struct {
+	Err        KError
+	Name       string
+	IsInternal bool // Only valid for Version >= 1
+	Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	tm.Err = KError(tmp)
+
+	tm.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		tm.IsInternal, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	tm.Partitions = make([]*PartitionMetadata, n)
+	for i := 0; i < n; i++ {
+		tm.Partitions[i] = new(PartitionMetadata)
+		err = tm.Partitions[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(tm.Err))
+
+	err = pe.putString(tm.Name)
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		pe.putBool(tm.IsInternal)
+	}
+
+	err = pe.putArrayLength(len(tm.Partitions))
+	if err != nil {
+		return err
+	}
+
+	for _, pm := range tm.Partitions {
+		err = pm.encode(pe, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type MetadataResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Brokers        []*Broker
+	ClusterID      *string
+	ControllerID   int32
+	Topics         []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Brokers = make([]*Broker, n)
+	for i := 0; i < n; i++ {
+		r.Brokers[i] = new(Broker)
+		err = r.Brokers[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if version >= 2 {
+		r.ClusterID, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	if version >= 1 {
+		r.ControllerID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		r.ControllerID = -1
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]*TopicMetadata, n)
+	for i := 0; i < n; i++ {
+		r.Topics[i] = new(TopicMetadata)
+		err = r.Topics[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	err := pe.putArrayLength(len(r.Brokers))
+	if err != nil {
+		return err
+	}
+	for _, broker := range r.Brokers {
+		err = broker.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		err := pe.putNullableString(r.ClusterID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ControllerID)
+	}
+
+	err = pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+	for _, tm := range r.Topics {
+		err = tm.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+	return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_0_0
+	case 2:
+		return V0_10_1_0
+	case 3, 4:
+		return V0_11_0_0
+	case 5:
+		return V1_0_0_0
+	default:
+		return MinVersion
+	}
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+	var tmatch *TopicMetadata
+
+	for _, tm := range r.Topics {
+		if tm.Name == topic {
+			tmatch = tm
+			goto foundTopic
+		}
+	}
+
+	tmatch = new(TopicMetadata)
+	tmatch.Name = topic
+	r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+	tmatch.Err = err
+	return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
+	tmatch := r.AddTopic(topic, ErrNoError)
+	var pmatch *PartitionMetadata
+
+	for _, pm := range tmatch.Partitions {
+		if pm.ID == partition {
+			pmatch = pm
+			goto foundPartition
+		}
+	}
+
+	pmatch = new(PartitionMetadata)
+	pmatch.ID = partition
+	tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+	pmatch.Leader = brokerID
+	pmatch.Replicas = replicas
+	pmatch.Isr = isr
+	pmatch.OfflineReplicas = offline
+	pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 0000000..90e5a87
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,43 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+	metricsReservoirSize = 1028
+	metricsAlphaFactor   = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+	return r.GetOrRegister(name, func() metrics.Histogram {
+		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+	}).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+	// Use broker id like the Java client as it does not contain '.' or ':' characters that
+	// can be interpreted as special character by monitoring tool (e.g. Graphite)
+	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+	// cf. KAFKA-1902 and KAFKA-2337
+	return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 0000000..4ed46a6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,403 @@
+package sarama
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+const (
+	expectationTimeout = 500 * time.Millisecond
+)
+
+type GSSApiHandlerFunc func([]byte) []byte
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+	brokerID      int32
+	port          int32
+	closing       chan none
+	stopper       chan none
+	expectations  chan encoder
+	listener      net.Listener
+	t             TestReporter
+	latency       time.Duration
+	handler       requestHandlerFunc
+	notifier      RequestNotifierFunc
+	history       []RequestResponse
+	lock          sync.Mutex
+	gssApiHandler GSSApiHandlerFunc
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+	Request  protocolBody
+	Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+	b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+	b.setHandler(func(req *request) (res encoder) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		mockResponse := handlerMap[reqTypeName]
+		if mockResponse == nil {
+			return nil
+		}
+		return mockResponse.For(req.body)
+	})
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+	b.lock.Lock()
+	b.notifier = notifier
+	b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+	return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+	b.lock.Lock()
+	history := make([]RequestResponse, len(b.history))
+	copy(history, b.history)
+	b.lock.Unlock()
+	return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+	return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+	return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+	close(b.expectations)
+	if len(b.expectations) > 0 {
+		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+		for e := range b.expectations {
+			_, _ = buf.WriteString(spew.Sdump(e))
+		}
+		b.t.Error(buf.String())
+	}
+	close(b.closing)
+	<-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+	b.lock.Lock()
+	b.handler = handler
+	b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+	defer close(b.stopper)
+	var err error
+	var conn net.Conn
+
+	go func() {
+		<-b.closing
+		err := b.listener.Close()
+		if err != nil {
+			b.t.Error(err)
+		}
+	}()
+
+	wg := &sync.WaitGroup{}
+	i := 0
+	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+		wg.Add(1)
+		go b.handleRequests(conn, i, wg)
+		i++
+	}
+	wg.Wait()
+	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+	b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	fullBytes := append(lengthBytes, encodedReq...)
+
+	return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+	defer wg.Done()
+	defer func() {
+		_ = conn.Close()
+	}()
+	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+	var err error
+
+	abort := make(chan none)
+	defer close(abort)
+	go func() {
+		select {
+		case <-b.closing:
+			_ = conn.Close()
+		case <-abort:
+		}
+	}()
+
+	resHeader := make([]byte, 8)
+	var bytesWritten int
+	var bytesRead int
+	for {
+
+		buffer, err := b.readToBytes(conn)
+		if err != nil {
+			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
+			b.serverError(err)
+			break
+		}
+
+		bytesWritten = 0
+		if !b.isGSSAPI(buffer) {
+
+			req, br, err := decodeRequest(bytes.NewReader(buffer))
+			bytesRead = br
+			if err != nil {
+				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+				b.serverError(err)
+				break
+			}
+
+			if b.latency > 0 {
+				time.Sleep(b.latency)
+			}
+
+			b.lock.Lock()
+			res := b.handler(req)
+			b.history = append(b.history, RequestResponse{req.body, res})
+			b.lock.Unlock()
+
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+				continue
+			}
+			Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+			encodedRes, err := encode(res, nil)
+			if err != nil {
+				b.serverError(err)
+				break
+			}
+			if len(encodedRes) == 0 {
+				b.lock.Lock()
+				if b.notifier != nil {
+					b.notifier(bytesRead, 0)
+				}
+				b.lock.Unlock()
+				continue
+			}
+
+			binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+			binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+			if _, err = conn.Write(resHeader); err != nil {
+				b.serverError(err)
+				break
+			}
+			if _, err = conn.Write(encodedRes); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(resHeader) + len(encodedRes)
+
+		} else {
+			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+			b.lock.Lock()
+			res := b.gssApiHandler(buffer)
+			b.lock.Unlock()
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+				continue
+			}
+			if _, err = conn.Write(res); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(res)
+		}
+
+		b.lock.Lock()
+		if b.notifier != nil {
+			b.notifier(bytesRead, bytesWritten)
+		}
+		b.lock.Unlock()
+
+	}
+	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+	select {
+	case res, ok := <-b.expectations:
+		if !ok {
+			return nil
+		}
+		return res
+	case <-time.After(expectationTimeout):
+		return nil
+	}
+}
+
+func (b *MockBroker) serverError(err error) {
+	isConnectionClosedError := false
+	if _, ok := err.(*net.OpError); ok {
+		isConnectionClosedError = true
+	} else if err == io.EOF {
+		isConnectionClosedError = true
+	} else if err.Error() == "use of closed network connection" {
+		isConnectionClosedError = true
+	}
+
+	if isConnectionClosedError {
+		return
+	}
+
+	b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use.  If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+	return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+	listener, err := net.Listen("tcp", addr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+	var err error
+
+	broker := &MockBroker{
+		closing:      make(chan none),
+		stopper:      make(chan none),
+		t:            t,
+		brokerID:     brokerID,
+		expectations: make(chan encoder, 512),
+		listener:     listener,
+	}
+	broker.handler = broker.defaultRequestHandler
+
+	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := strconv.ParseInt(portStr, 10, 32)
+	if err != nil {
+		t.Fatal(err)
+	}
+	broker.port = int32(tmp)
+
+	go broker.serverLoop()
+
+	return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+	b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go
new file mode 100644
index 0000000..affeb2d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockkerberos.go
@@ -0,0 +1,123 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"gopkg.in/jcmturner/gokrb5.v7/credentials"
+	"gopkg.in/jcmturner/gokrb5.v7/gssapi"
+	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
+	"gopkg.in/jcmturner/gokrb5.v7/messages"
+	"gopkg.in/jcmturner/gokrb5.v7/types"
+)
+
+type KafkaGSSAPIHandler struct {
+	client         *MockKerberosClient
+	badResponse    bool
+	badKeyChecksum bool
+}
+
+func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte {
+	// Default payload used for verify
+	err := h.client.Login() // Mock client construct keys when login
+	if err != nil {
+		return nil
+	}
+	if h.badResponse { // Returns trash
+		return []byte{0x00, 0x00, 0x00, 0x01, 0xAD}
+	}
+
+	var pack = gssapi.WrapToken{
+		Flags:     KRB5_USER_AUTH,
+		EC:        12,
+		RRC:       0,
+		SndSeqNum: 3398292281,
+		Payload:   []byte{0x11, 0x00}, // 1100
+	}
+	// Compute checksum
+	if h.badKeyChecksum {
+		pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+	} else {
+		err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if err != nil {
+			return nil
+		}
+	}
+
+	packBytes, err := pack.Marshal()
+	if err != nil {
+		return nil
+	}
+	lenBytes := len(packBytes)
+	response := make([]byte, lenBytes+4)
+	copy(response[4:], packBytes)
+	binary.BigEndian.PutUint32(response, uint32(lenBytes))
+	return response
+}
+
+type MockKerberosClient struct {
+	asReqBytes  string
+	asRepBytes  string
+	ASRep       messages.ASRep
+	credentials *credentials.Credentials
+	mockError   error
+	errorStage  string
+}
+
+func (c *MockKerberosClient) Login() error {
+	if c.errorStage == "login" && c.mockError != nil {
+		return c.mockError
+	}
+	c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" +
+		"558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" +
+		"4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" +
+		"7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" +
+		"d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" +
+		"549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" +
+		"2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" +
+		"7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" +
+		"997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" +
+		"482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" +
+		"03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" +
+		"331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" +
+		"aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" +
+		"da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" +
+		"eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885"
+	apRepBytes, err := hex.DecodeString(c.asRepBytes)
+	if err != nil {
+		return err
+	}
+	err = c.ASRep.Unmarshal(apRepBytes)
+	if err != nil {
+		return err
+	}
+	c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty")
+	_, err = c.ASRep.DecryptEncPart(c.credentials)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
+	if c.errorStage == "service_ticket" && c.mockError != nil {
+		return messages.Ticket{}, types.EncryptionKey{}, c.mockError
+	}
+	return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil
+}
+
+func (c *MockKerberosClient) Domain() string {
+	return "EXAMPLE.COM"
+}
+func (c *MockKerberosClient) CName() types.PrincipalName {
+	var p = types.PrincipalName{
+		NameType: KRB5_USER_AUTH,
+		NameString: []string{
+			"kafka",
+			"kafka",
+		},
+	}
+	return p
+}
+func (c *MockKerberosClient) Destroy() {
+	// Do nothing.
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 0000000..7dcc93e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,941 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+	For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+	res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+	return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+	return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+	responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+	ms := &MockSequence{}
+	ms.responses = make([]MockResponse, len(responses))
+	for i, res := range responses {
+		switch res := res.(type) {
+		case MockResponse:
+			ms.responses[i] = res
+		case encoder:
+			ms.responses[i] = NewMockWrapper(res)
+		default:
+			panic(fmt.Sprintf("Unexpected response type: %T", res))
+		}
+	}
+	return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+	res = mc.responses[0].For(reqBody)
+	if len(mc.responses) > 1 {
+		mc.responses = mc.responses[1:]
+	}
+	return res
+}
+
+type MockListGroupsResponse struct {
+	groups map[string]string
+	t      TestReporter
+}
+
+func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
+	return &MockListGroupsResponse{
+		groups: make(map[string]string),
+		t:      t,
+	}
+}
+
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder {
+	request := reqBody.(*ListGroupsRequest)
+	_ = request
+	response := &ListGroupsResponse{
+		Groups: m.groups,
+	}
+	return response
+}
+
+func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
+	m.groups[groupID] = protocolType
+	return m
+}
+
+type MockDescribeGroupsResponse struct {
+	groups map[string]*GroupDescription
+	t      TestReporter
+}
+
+func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
+	return &MockDescribeGroupsResponse{
+		t:      t,
+		groups: make(map[string]*GroupDescription),
+	}
+}
+
+func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
+	m.groups[groupID] = description
+	return m
+}
+
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder {
+	request := reqBody.(*DescribeGroupsRequest)
+
+	response := &DescribeGroupsResponse{}
+	for _, requestedGroup := range request.Groups {
+		if group, ok := m.groups[requestedGroup]; ok {
+			response.Groups = append(response.Groups, group)
+		} else {
+			// Mimic real kafka - if a group doesn't exist, return
+			// an entry with state "Dead"
+			response.Groups = append(response.Groups, &GroupDescription{
+				GroupId: requestedGroup,
+				State:   "Dead",
+			})
+		}
+	}
+
+	return response
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+	controllerID int32
+	leaders      map[string]map[int32]int32
+	brokers      map[string]int32
+	t            TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+	return &MockMetadataResponse{
+		leaders: make(map[string]map[int32]int32),
+		brokers: make(map[string]int32),
+		t:       t,
+	}
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+	partitions := mmr.leaders[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int32)
+		mmr.leaders[topic] = partitions
+	}
+	partitions[partition] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+	mmr.brokers[addr] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+	mmr.controllerID = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+	metadataRequest := reqBody.(*MetadataRequest)
+	metadataResponse := &MetadataResponse{
+		Version:      metadataRequest.version(),
+		ControllerID: mmr.controllerID,
+	}
+	for addr, brokerID := range mmr.brokers {
+		metadataResponse.AddBroker(addr, brokerID)
+	}
+
+	// Generate set of replicas
+	replicas := []int32{}
+	offlineReplicas := []int32{}
+	for _, brokerID := range mmr.brokers {
+		replicas = append(replicas, brokerID)
+	}
+
+	if len(metadataRequest.Topics) == 0 {
+		for topic, partitions := range mmr.leaders {
+			for partition, brokerID := range partitions {
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+			}
+		}
+		return metadataResponse
+	}
+	for _, topic := range metadataRequest.Topics {
+		for partition, brokerID := range mmr.leaders[topic] {
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+		}
+	}
+	return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+	offsets map[string]map[int32]map[int64]int64
+	t       TestReporter
+	version int16
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+	return &MockOffsetResponse{
+		offsets: make(map[string]map[int32]map[int64]int64),
+		t:       t,
+	}
+}
+
+func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
+	mor.version = version
+	return mor
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]int64)
+		mor.offsets[topic] = partitions
+	}
+	times := partitions[partition]
+	if times == nil {
+		times = make(map[int64]int64)
+		partitions[partition] = times
+	}
+	times[time] = offset
+	return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+	offsetRequest := reqBody.(*OffsetRequest)
+	offsetResponse := &OffsetResponse{Version: mor.version}
+	for topic, partitions := range offsetRequest.blocks {
+		for partition, block := range partitions {
+			offset := mor.getOffset(topic, partition, block.time)
+			offsetResponse.AddTopicPartition(topic, partition, offset)
+		}
+	}
+	return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		mor.t.Errorf("missing topic: %s", topic)
+	}
+	times := partitions[partition]
+	if times == nil {
+		mor.t.Errorf("missing partition: %d", partition)
+	}
+	offset, ok := times[time]
+	if !ok {
+		mor.t.Errorf("missing time: %d", time)
+	}
+	return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+	messages       map[string]map[int32]map[int64]Encoder
+	highWaterMarks map[string]map[int32]int64
+	t              TestReporter
+	batchSize      int
+	version        int16
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+	return &MockFetchResponse{
+		messages:       make(map[string]map[int32]map[int64]Encoder),
+		highWaterMarks: make(map[string]map[int32]int64),
+		t:              t,
+		batchSize:      batchSize,
+	}
+}
+
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+	mfr.version = version
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]Encoder)
+		mfr.messages[topic] = partitions
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		messages = make(map[int64]Encoder)
+		partitions[partition] = messages
+	}
+	messages[offset] = msg
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int64)
+		mfr.highWaterMarks[topic] = partitions
+	}
+	partitions[partition] = offset
+	return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+	fetchRequest := reqBody.(*FetchRequest)
+	res := &FetchResponse{
+		Version: mfr.version,
+	}
+	for topic, partitions := range fetchRequest.blocks {
+		for partition, block := range partitions {
+			initialOffset := block.fetchOffset
+			offset := initialOffset
+			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+			for i := 0; i < mfr.batchSize && offset < maxOffset; {
+				msg := mfr.getMessage(topic, partition, offset)
+				if msg != nil {
+					res.AddMessage(topic, partition, nil, msg, offset)
+					i++
+				}
+				offset++
+			}
+			fb := res.GetBlock(topic, partition)
+			if fb == nil {
+				res.AddError(topic, partition, ErrNoError)
+				fb = res.GetBlock(topic, partition)
+			}
+			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+		}
+	}
+	return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return nil
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return nil
+	}
+	return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return 0
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return 0
+	}
+	return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		return 0
+	}
+	return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+	coordinators map[string]interface{}
+	t            TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+	return &MockConsumerMetadataResponse{
+		coordinators: make(map[string]interface{}),
+		t:            t,
+	}
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = broker
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = kerror
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ConsumerMetadataRequest)
+	group := req.ConsumerGroup
+	res := &ConsumerMetadataResponse{}
+	v := mr.coordinators[group]
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+	groupCoordinators map[string]interface{}
+	transCoordinators map[string]interface{}
+	t                 TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+	return &MockFindCoordinatorResponse{
+		groupCoordinators: make(map[string]interface{}),
+		transCoordinators: make(map[string]interface{}),
+		t:                 t,
+	}
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = broker
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = broker
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = kerror
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = kerror
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*FindCoordinatorRequest)
+	res := &FindCoordinatorResponse{}
+	var v interface{}
+	switch req.CoordinatorType {
+	case CoordinatorGroup:
+		v = mr.groupCoordinators[req.CoordinatorKey]
+	case CoordinatorTransaction:
+		v = mr.transCoordinators[req.CoordinatorKey]
+	}
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+	errors map[string]map[string]map[int32]KError
+	t      TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+	return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[string]map[int32]KError)
+	}
+	topics := mr.errors[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]KError)
+		mr.errors[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		topics[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetCommitRequest)
+	group := req.ConsumerGroup
+	res := &OffsetCommitResponse{}
+	for topic, partitions := range req.blocks {
+		for partition := range partitions {
+			res.AddError(topic, partition, mr.getError(group, topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+	topics := mr.errors[group]
+	if topics == nil {
+		return ErrNoError
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+	version int16
+	errors  map[string]map[int32]KError
+	t       TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+	return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+	mr.version = version
+	return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[int32]KError)
+	}
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		mr.errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ProduceRequest)
+	res := &ProduceResponse{
+		Version: mr.version,
+	}
+	for topic, partitions := range req.records {
+		for partition := range partitions {
+			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	error   KError
+	t       TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+	return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+	if mr.offsets == nil {
+		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	topics := mr.offsets[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+		mr.offsets[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		topics[topic] = partitions
+	}
+	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+	mr.error = kerror
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetFetchRequest)
+	group := req.ConsumerGroup
+	res := &OffsetFetchResponse{Version: req.Version}
+
+	for topic, partitions := range mr.offsets[group] {
+		for partition, block := range partitions {
+			res.AddBlock(topic, partition, block)
+		}
+	}
+
+	if res.Version >= 2 {
+		res.Err = mr.error
+	}
+	return res
+}
+
+type MockCreateTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+	return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreateTopicsRequest)
+	res := &CreateTopicsResponse{
+		Version: req.Version,
+	}
+	res.TopicErrors = make(map[string]*TopicError)
+
+	for topic := range req.TopicDetails {
+		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create topic with reserved prefix"
+			res.TopicErrors[topic] = &TopicError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+	return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteTopicsRequest)
+	res := &DeleteTopicsResponse{}
+	res.TopicErrorCodes = make(map[string]KError)
+
+	for _, topic := range req.Topics {
+		res.TopicErrorCodes[topic] = ErrNoError
+	}
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockCreatePartitionsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+	return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreatePartitionsRequest)
+	res := &CreatePartitionsResponse{}
+	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+	for topic := range req.TopicPartitions {
+		if strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create partition on topic with reserved prefix"
+			res.TopicPartitionErrors[topic] = &TopicPartitionError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteRecordsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+	return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteRecordsRequest)
+	res := &DeleteRecordsResponse{}
+	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+	for topic, deleteRecordRequestTopic := range req.Topics {
+		partitions := make(map[int32]*DeleteRecordsResponsePartition)
+		for partition := range deleteRecordRequestTopic.PartitionOffsets {
+			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+		}
+		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+	return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{}
+
+	for _, r := range req.Resources {
+		var configEntries []*ConfigEntry
+		switch r.Type {
+		case TopicResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{Name: "max.message.bytes",
+					Value:     "1000000",
+					ReadOnly:  false,
+					Default:   true,
+					Sensitive: false,
+				}, &ConfigEntry{Name: "retention.ms",
+					Value:     "5000",
+					ReadOnly:  false,
+					Default:   false,
+					Sensitive: false,
+				}, &ConfigEntry{Name: "password",
+					Value:     "12345",
+					ReadOnly:  false,
+					Default:   false,
+					Sensitive: true,
+				})
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		}
+	}
+	return res
+}
+
+type MockAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+	return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
+			Type:     TopicResource,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockCreateAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+	return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+	}
+	return res
+}
+
+type MockListAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+	return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DescribeAclsRequest)
+	res := &DescribeAclsResponse{}
+	res.Err = ErrNoError
+	acl := &ResourceAcls{}
+	if req.ResourceName != nil {
+		acl.Resource.ResourceName = *req.ResourceName
+	}
+	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
+	acl.Resource.ResourceType = req.ResourceType
+
+	host := "*"
+	if req.Host != nil {
+		host = *req.Host
+	}
+
+	principal := "User:test"
+	if req.Principal != nil {
+		principal = *req.Principal
+	}
+
+	permissionType := req.PermissionType
+	if permissionType == AclPermissionAny {
+		permissionType = AclPermissionAllow
+	}
+
+	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockSaslAuthenticateResponse struct {
+	t             TestReporter
+	kerror        KError
+	saslAuthBytes []byte
+}
+
+func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
+	return &MockSaslAuthenticateResponse{t: t}
+}
+
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder {
+	res := &SaslAuthenticateResponse{}
+	res.Err = msar.kerror
+	res.SaslAuthBytes = msar.saslAuthBytes
+	return res
+}
+
+func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
+	msar.kerror = kerror
+	return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
+	msar.saslAuthBytes = saslAuthBytes
+	return msar
+}
+
+type MockDeleteAclsResponse struct {
+	t TestReporter
+}
+
+type MockSaslHandshakeResponse struct {
+	enabledMechanisms []string
+	kerror            KError
+	t                 TestReporter
+}
+
+func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
+	return &MockSaslHandshakeResponse{t: t}
+}
+
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder {
+	res := &SaslHandshakeResponse{}
+	res.Err = mshr.kerror
+	res.EnabledMechanisms = mshr.enabledMechanisms
+	return res
+}
+
+func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
+	mshr.kerror = kerror
+	return mshr
+}
+
+func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
+	mshr.enabledMechanisms = enabledMechanisms
+	return mshr
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+	return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteAclsRequest)
+	res := &DeleteAclsResponse{}
+
+	for range req.Filters {
+		response := &FilterResponse{Err: ErrNoError}
+		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+		res.FilterResponses = append(res.FilterResponses, response)
+	}
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockDeleteGroupsResponse struct {
+	deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+	return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+	m.deletedGroups = groups
+	return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder {
+	resp := &DeleteGroupsResponse{
+		GroupErrorCodes: map[string]KError{},
+	}
+	for _, group := range m.deletedGroups {
+		resp.GroupErrorCodes[group] = ErrNoError
+	}
+	return resp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 0000000..5732ed9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,210 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+	offset    int64
+	timestamp int64
+	metadata  string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(b.offset)
+	if version == 1 {
+		pe.putInt64(b.timestamp)
+	} else if b.timestamp != 0 {
+		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+	}
+
+	return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 1 {
+		if b.timestamp, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	b.metadata, err = pd.getString()
+	return err
+}
+
+type OffsetCommitRequest struct {
+	ConsumerGroup           string
+	ConsumerGroupGeneration int32  // v1 or later
+	ConsumerID              string // v1 or later
+	RetentionTime           int64  // v2 or later
+
+	// Version can be:
+	// - 0 (kafka 0.8.1 and later)
+	// - 1 (kafka 0.8.2 and later)
+	// - 2 (kafka 0.9.0 and later)
+	// - 3 (kafka 0.11.0 and later)
+	// - 4 (kafka 2.0.0 and later)
+	Version int16
+	blocks  map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 4 {
+		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+	}
+
+	if err := pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ConsumerGroupGeneration)
+		if err := pe.putString(r.ConsumerID); err != nil {
+			return err
+		}
+	} else {
+		if r.ConsumerGroupGeneration != 0 {
+			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+		}
+		if r.ConsumerID != "" {
+			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+		}
+	}
+
+	if r.Version >= 2 {
+		pe.putInt64(r.RetentionTime)
+	} else if r.RetentionTime != 0 {
+		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+	}
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.ConsumerID, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		if r.RetentionTime, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetCommitRequestBlock{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_9_0_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+	}
+
+	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+	partitions := r.blocks[topic]
+	if partitions == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	block := partitions[partitionID]
+	if block == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 0000000..e842298
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+type OffsetCommitResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Errors         map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, kerror := range partitions {
+			pe.putInt32(partition)
+			pe.putInt16(int16(kerror))
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			tmp, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+			r.Errors[name][id] = KError(tmp)
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_9_0_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	default:
+		return MinVersion
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..6860824
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type OffsetFetchRequest struct {
+	Version       int16
+	ConsumerGroup string
+	partitions    map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 5 {
+		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+	}
+
+	if err = pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 2 && r.partitions == nil {
+		pe.putInt32(-1)
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+		for topic, partitions := range r.partitions {
+			if err = pe.putString(topic); err != nil {
+				return err
+			}
+			if err = pe.putInt32Array(partitions); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+	partitionCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+	r.partitions = make(map[string][]int32)
+	for i := 0; i < partitionCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		r.partitions[topic] = partitions
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_10_2_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	case 5:
+		return V2_1_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+	if r.partitions == nil && r.Version >= 2 {
+		r.partitions = make(map[string][]int32)
+	}
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..9e25702
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,197 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+	Offset      int64
+	LeaderEpoch int32
+	Metadata    string
+	Err         KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		b.LeaderEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.Metadata, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt64(b.Offset)
+
+	if version >= 5 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+
+	err = pe.putString(b.Metadata)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt16(int16(b.Err))
+
+	return nil
+}
+
+type OffsetFetchResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
+	Err            KError
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	if r.Version >= 2 {
+		pe.putInt16(int16(r.Err))
+	}
+	return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			numBlocks, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			if numBlocks == 0 {
+				r.Blocks[name] = nil
+				continue
+			}
+			r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+			for j := 0; j < numBlocks; j++ {
+				id, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+
+				block := new(OffsetFetchResponseBlock)
+				err = block.decode(pd, version)
+				if err != nil {
+					return err
+				}
+				r.Blocks[name][id] = block
+			}
+		}
+	}
+
+	if version >= 2 {
+		kerr, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.Err = KError(kerr)
+	}
+
+	return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_10_2_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	case 5:
+		return V2_1_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	partitions := r.Blocks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 0000000..e40f429
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,585 @@
+package sarama
+
+import (
+	"sync"
+	"time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+	// It will return an error if this OffsetManager is already managing the given
+	// topic/partition.
+	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+	// Close stops the OffsetManager from managing offsets. It is required to call
+	// this function before an OffsetManager object passes out of scope, as it
+	// will otherwise leak memory. You must call this after all the
+	// PartitionOffsetManagers are closed.
+	Close() error
+}
+
+type offsetManager struct {
+	client Client
+	conf   *Config
+	group  string
+	ticker *time.Ticker
+
+	memberID   string
+	generation int32
+
+	broker     *Broker
+	brokerLock sync.RWMutex
+
+	poms     map[string]map[int32]*partitionOffsetManager
+	pomsLock sync.RWMutex
+
+	closeOnce sync.Once
+	closing   chan none
+	closed    chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	conf := client.Config()
+	om := &offsetManager{
+		client: client,
+		conf:   conf,
+		group:  group,
+		ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval),
+		poms:   make(map[string]map[int32]*partitionOffsetManager),
+
+		memberID:   memberID,
+		generation: generation,
+
+		closing: make(chan none),
+		closed:  make(chan none),
+	}
+	go withRecover(om.mainLoop)
+
+	return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+	pom, err := om.newPartitionOffsetManager(topic, partition)
+	if err != nil {
+		return nil, err
+	}
+
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	topicManagers := om.poms[topic]
+	if topicManagers == nil {
+		topicManagers = make(map[int32]*partitionOffsetManager)
+		om.poms[topic] = topicManagers
+	}
+
+	if topicManagers[partition] != nil {
+		return nil, ConfigurationError("That topic/partition is already being managed")
+	}
+
+	topicManagers[partition] = pom
+	return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+	om.closeOnce.Do(func() {
+		// exit the mainLoop
+		close(om.closing)
+		<-om.closed
+
+		// mark all POMs as closed
+		om.asyncClosePOMs()
+
+		// flush one last time
+		for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+			om.flushToBroker()
+			if om.releasePOMs(false) == 0 {
+				break
+			}
+		}
+
+		om.releasePOMs(true)
+		om.brokerLock.Lock()
+		om.broker = nil
+		om.brokerLock.Unlock()
+	})
+	return nil
+}
+
+func (om *offsetManager) computeBackoff(retries int) time.Duration {
+	if om.conf.Metadata.Retry.BackoffFunc != nil {
+		return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
+	} else {
+		return om.conf.Metadata.Retry.Backoff
+	}
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
+	broker, err := om.coordinator()
+	if err != nil {
+		if retries <= 0 {
+			return 0, "", err
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	req := new(OffsetFetchRequest)
+	req.Version = 1
+	req.ConsumerGroup = om.group
+	req.AddPartition(topic, partition)
+
+	resp, err := broker.FetchOffset(req)
+	if err != nil {
+		if retries <= 0 {
+			return 0, "", err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	block := resp.GetBlock(topic, partition)
+	if block == nil {
+		return 0, "", ErrIncompleteResponse
+	}
+
+	switch block.Err {
+	case ErrNoError:
+		return block.Offset, block.Metadata, nil
+	case ErrNotCoordinatorForConsumer:
+		if retries <= 0 {
+			return 0, "", block.Err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	case ErrOffsetsLoadInProgress:
+		if retries <= 0 {
+			return 0, "", block.Err
+		}
+		backoff := om.computeBackoff(retries)
+		select {
+		case <-om.closing:
+			return 0, "", block.Err
+		case <-time.After(backoff):
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	default:
+		return 0, "", block.Err
+	}
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+	om.brokerLock.RLock()
+	broker := om.broker
+	om.brokerLock.RUnlock()
+
+	if broker != nil {
+		return broker, nil
+	}
+
+	om.brokerLock.Lock()
+	defer om.brokerLock.Unlock()
+
+	if broker := om.broker; broker != nil {
+		return broker, nil
+	}
+
+	if err := om.client.RefreshCoordinator(om.group); err != nil {
+		return nil, err
+	}
+
+	broker, err := om.client.Coordinator(om.group)
+	if err != nil {
+		return nil, err
+	}
+
+	om.broker = broker
+	return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+	om.brokerLock.Lock()
+	if om.broker == b {
+		om.broker = nil
+	}
+	om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+	defer om.ticker.Stop()
+	defer close(om.closed)
+
+	for {
+		select {
+		case <-om.ticker.C:
+			om.flushToBroker()
+			om.releasePOMs(false)
+		case <-om.closing:
+			return
+		}
+	}
+}
+
+// flushToBroker is ignored if auto-commit offsets is disabled
+func (om *offsetManager) flushToBroker() {
+	if !om.conf.Consumer.Offsets.AutoCommit.Enable {
+		return
+	}
+
+	req := om.constructRequest()
+	if req == nil {
+		return
+	}
+
+	broker, err := om.coordinator()
+	if err != nil {
+		om.handleError(err)
+		return
+	}
+
+	resp, err := broker.CommitOffset(req)
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	om.handleResponse(broker, req, resp)
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+	var r *OffsetCommitRequest
+	var perPartitionTimestamp int64
+	if om.conf.Consumer.Offsets.Retention == 0 {
+		perPartitionTimestamp = ReceiveTime
+		r = &OffsetCommitRequest{
+			Version:                 1,
+			ConsumerGroup:           om.group,
+			ConsumerID:              om.memberID,
+			ConsumerGroupGeneration: om.generation,
+		}
+	} else {
+		r = &OffsetCommitRequest{
+			Version:                 2,
+			RetentionTime:           int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
+			ConsumerGroup:           om.group,
+			ConsumerID:              om.memberID,
+			ConsumerGroupGeneration: om.generation,
+		}
+
+	}
+
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.lock.Lock()
+			if pom.dirty {
+				r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
+			}
+			pom.lock.Unlock()
+		}
+	}
+
+	if len(r.blocks) > 0 {
+		return r
+	}
+
+	return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+				continue
+			}
+
+			var err KError
+			var ok bool
+
+			if resp.Errors[pom.topic] == nil {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+
+			switch err {
+			case ErrNoError:
+				block := req.blocks[pom.topic][pom.partition]
+				pom.updateCommitted(block.offset, block.metadata)
+			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+				// not a critical error, we just need to redispatch
+				om.releaseCoordinator(broker)
+			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+				// nothing we can do about this, just tell the user and carry on
+				pom.handleError(err)
+			case ErrOffsetsLoadInProgress:
+				// nothing wrong but we didn't commit, we'll get it next time round
+			case ErrUnknownTopicOrPartition:
+				// let the user know *and* try redispatching - if topic-auto-create is
+				// enabled, redispatching should trigger a metadata req and create the
+				// topic; if not then re-dispatching won't help, but we've let the user
+				// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+				fallthrough
+			default:
+				// dunno, tell the user and try redispatching
+				pom.handleError(err)
+				om.releaseCoordinator(broker)
+			}
+		}
+	}
+}
+
+func (om *offsetManager) handleError(err error) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.handleError(err)
+		}
+	}
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.AsyncClose()
+		}
+	}
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	for topic, topicManagers := range om.poms {
+		for partition, pom := range topicManagers {
+			pom.lock.Lock()
+			releaseDue := pom.done && (force || !pom.dirty)
+			pom.lock.Unlock()
+
+			if releaseDue {
+				pom.release()
+
+				delete(om.poms[topic], partition)
+				if len(om.poms[topic]) == 0 {
+					delete(om.poms, topic)
+				}
+			}
+		}
+		remaining += len(om.poms[topic])
+	}
+	return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	if partitions, ok := om.poms[topic]; ok {
+		if pom, ok := partitions[partition]; ok {
+			return pom
+		}
+	}
+	return nil
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+	// NextOffset returns the next offset that should be consumed for the managed
+	// partition, accompanied by metadata which can be used to reconstruct the state
+	// of the partition consumer when it resumes. NextOffset() will return
+	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+	// was committed for this partition yet.
+	NextOffset() (int64, string)
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(offset int64, metadata string)
+
+	// Errors returns a read channel of errors that occur during offset management, if
+	// enabled. By default, errors are logged and not returned over this channel. If
+	// you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+	// return immediately, after which you should wait until the 'errors' channel has
+	// been drained and closed. It is required to call this function, or Close before
+	// a consumer object passes out of scope, as it will otherwise leak memory. You
+	// must call this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionOffsetManager from managing offsets. It is required to
+	// call this function (or AsyncClose) before a PartitionOffsetManager object
+	// passes out of scope, as it will otherwise leak memory. You must call this
+	// before calling Close on the underlying client.
+	Close() error
+}
+
+type partitionOffsetManager struct {
+	parent    *offsetManager
+	topic     string
+	partition int32
+
+	lock     sync.Mutex
+	offset   int64
+	metadata string
+	dirty    bool
+	done     bool
+
+	releaseOnce sync.Once
+	errors      chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+	offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+	if err != nil {
+		return nil, err
+	}
+
+	return &partitionOffsetManager{
+		parent:    om,
+		topic:     topic,
+		partition: partition,
+		errors:    make(chan *ConsumerError, om.conf.ChannelBufferSize),
+		offset:    offset,
+		metadata:  metadata,
+	}, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+	return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset > pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset <= pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset == offset && pom.metadata == metadata {
+		pom.dirty = false
+	}
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset >= 0 {
+		return pom.offset, pom.metadata
+	}
+
+	return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+	pom.lock.Lock()
+	pom.done = true
+	pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+	pom.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range pom.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+	cErr := &ConsumerError{
+		Topic:     pom.topic,
+		Partition: pom.partition,
+		Err:       err,
+	}
+
+	if pom.parent.conf.Consumer.Return.Errors {
+		pom.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (pom *partitionOffsetManager) release() {
+	pom.releaseOnce.Do(func() {
+		close(pom.errors)
+	})
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 0000000..326c372
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,156 @@
+package sarama
+
+type offsetRequestBlock struct {
+	time       int64
+	maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(int64(b.time))
+	if version == 0 {
+		pe.putInt32(b.maxOffsets)
+	}
+
+	return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.time, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 0 {
+		if b.maxOffsets, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type OffsetRequest struct {
+	Version        int16
+	replicaID      int32
+	isReplicaIDSet bool
+	blocks         map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+	if r.isReplicaIDSet {
+		pe.putInt32(r.replicaID)
+	} else {
+		// default replica ID is always -1 for clients
+		pe.putInt32(-1)
+	}
+
+	err := pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	replicaID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if replicaID >= 0 {
+		r.SetReplicaID(replicaID)
+	}
+
+	blockCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if blockCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	for i := 0; i < blockCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetRequestBlock{}
+			if err := block.decode(pd, version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+	return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+	r.replicaID = id
+	r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+	if r.isReplicaIDSet {
+		return r.replicaID
+	}
+	return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+	}
+
+	tmp := new(offsetRequestBlock)
+	tmp.time = time
+	if r.Version == 0 {
+		tmp.maxOffsets = maxOffsets
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 0000000..8b2193f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+	Err       KError
+	Offsets   []int64 // Version 0
+	Offset    int64   // Version 1
+	Timestamp int64   // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	if version == 0 {
+		b.Offsets, err = pd.getInt64Array()
+
+		return err
+	}
+
+	b.Timestamp, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// For backwards compatibility put the offset in the offsets array too
+	b.Offsets = []int64{b.Offset}
+
+	return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	if version == 0 {
+		return pe.putInt64Array(b.Offsets)
+	}
+
+	pe.putInt64(b.Timestamp)
+	pe.putInt64(b.Offset)
+
+	return nil
+}
+
+type OffsetResponse struct {
+	Version int16
+	Blocks  map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.version()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+	return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return MinVersion
+	}
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*OffsetResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 0000000..9be854c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,61 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+	// Primitives
+	getInt8() (int8, error)
+	getInt16() (int16, error)
+	getInt32() (int32, error)
+	getInt64() (int64, error)
+	getVarint() (int64, error)
+	getArrayLength() (int, error)
+	getBool() (bool, error)
+
+	// Collections
+	getBytes() ([]byte, error)
+	getVarintBytes() ([]byte, error)
+	getRawBytes(length int) ([]byte, error)
+	getString() (string, error)
+	getNullableString() (*string, error)
+	getInt32Array() ([]int32, error)
+	getInt64Array() ([]int64, error)
+	getStringArray() ([]string, error)
+
+	// Subsets
+	remaining() int
+	getSubset(length int) (packetDecoder, error)
+	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
+
+	// Stacks, see PushDecoder
+	push(in pushDecoder) error
+	pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and check the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+	check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+	pushDecoder
+	decoder
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 0000000..67b8dae
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,65 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+	// Primitives
+	putInt8(in int8)
+	putInt16(in int16)
+	putInt32(in int32)
+	putInt64(in int64)
+	putVarint(in int64)
+	putArrayLength(in int) error
+	putBool(in bool)
+
+	// Collections
+	putBytes(in []byte) error
+	putVarintBytes(in []byte) error
+	putRawBytes(in []byte) error
+	putString(in string) error
+	putNullableString(in *string) error
+	putStringArray(in []string) error
+	putInt32Array(in []int32) error
+	putInt64Array(in []int64) error
+
+	// Provide the current offset to record the batch size metric
+	offset() int
+
+	// Stacks, see PushEncoder
+	push(in pushEncoder)
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and write the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+	// of data to the saved offset, based on the data between the saved offset and curOffset.
+	run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+	pushEncoder
+
+	// Called during pop() to adjust the length of the field.
+	// It should return the difference in bytes between the last computed length and current length.
+	adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 0000000..6a708e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,217 @@
+package sarama
+
+import (
+	"hash"
+	"hash/fnv"
+	"math/rand"
+	"time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+	// Partition takes a message and partition count and chooses a partition
+	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+	// RequiresConsistency indicates to the user of the partitioner whether the
+	// mapping of key->partition is consistent or not. Specifically, if a
+	// partitioner requires consistency then it must be allowed to choose from all
+	// partitions (even ones known to be unavailable), and its choice must be
+	// respected by the caller. The obvious example is the HashPartitioner.
+	RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+	Partitioner
+
+	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+	// but takes in the message being partitioned so that the partitioner can
+	// make a per-message determination.
+	MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.referenceAbs = true
+	}
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hasher = hasher()
+	}
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.random = hp
+	}
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+	return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+type randomPartitioner struct {
+	generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+	p := new(randomPartitioner)
+	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+	return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type roundRobinPartitioner struct {
+	partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+	return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if p.partition >= numPartitions {
+		p.partition = 0
+	}
+	ret := p.partition
+	p.partition++
+	return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type hashPartitioner struct {
+	random       Partitioner
+	hasher       hash.Hash32
+	referenceAbs bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = hasher()
+		p.referenceAbs = false
+		return p
+	}
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = fnv.New32a()
+		p.referenceAbs = false
+		for _, option := range options {
+			option(p)
+		}
+		return p
+	}
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = false
+	return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviours. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = true
+	return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if message.Key == nil {
+		return p.random.Partition(message, numPartitions)
+	}
+	bytes, err := message.Key.Encode()
+	if err != nil {
+		return -1, err
+	}
+	p.hasher.Reset()
+	_, err = p.hasher.Write(bytes)
+	if err != nil {
+		return -1, err
+	}
+	var partition int32
+	// Turns out we were doing our absolute value in a subtly different way from the upstream
+	// implementation, but now we need to maintain backwards compat for people who started using
+	// the old version; if referenceAbs is set we are compatible with the reference java client
+	// but not past Sarama versions
+	if p.referenceAbs {
+		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+	} else {
+		partition = int32(p.hasher.Sum32()) % numPartitions
+		if partition < 0 {
+			partition = -partition
+		}
+	}
+	return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+	return message.Key != nil
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 0000000..b633cd1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+	stack  []pushEncoder
+	length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+	pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+	if in > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+	}
+	pe.length += 4
+	return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+	pe.length++
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+	pe.length += 4
+	if in == nil {
+		return nil
+	}
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		pe.putVarint(-1)
+		return nil
+	}
+	pe.putVarint(int64(len(in)))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+	if len(in) > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.length += 2
+		return nil
+	}
+	return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+	pe.length += 2
+	if len(in) > math.MaxInt16 {
+		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 8 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) offset() int {
+	return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+	in.saveOffset(pe.length)
+	pe.length += in.reserveLength()
+	pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+	in := pe.stack[len(pe.stack)-1]
+	pe.stack = pe.stack[:len(pe.stack)-1]
+	if dpe, ok := in.(dynamicPushEncoder); ok {
+		pe.length += dpe.adjustLength(pe.length)
+	}
+
+	return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 0000000..0c755d0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,252 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all in-sync replicas to commit before responding.
+	// The minimum number of in-sync replicas is configured on the broker via
+	// the `min.insync.replicas` configuration key.
+	WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+	TransactionalID *string
+	RequiredAcks    RequiredAcks
+	Timeout         int32
+	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+	records         map[string]map[int32]Records
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	var topicRecordCount int64
+	for _, messageBlock := range msgSet.Messages {
+		// Is this a fake "message" wrapping real messages?
+		if messageBlock.Msg.Set != nil {
+			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+		} else {
+			// A single uncompressed message
+			topicRecordCount++
+		}
+		// Better be safe than sorry when computing the compression ratio
+		if messageBlock.Msg.compressedSize != 0 {
+			compressionRatio := float64(len(messageBlock.Msg.Value)) /
+				float64(messageBlock.Msg.compressedSize)
+			// Histogram do not support decimal values, let's multiple it by 100 for better precision
+			intCompressionRatio := int64(100 * compressionRatio)
+			compressionRatioMetric.Update(intCompressionRatio)
+			topicCompressionRatioMetric.Update(intCompressionRatio)
+		}
+	}
+	return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	if recordBatch.compressedRecords != nil {
+		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+		compressionRatioMetric.Update(compressionRatio)
+		topicCompressionRatioMetric.Update(compressionRatio)
+	}
+
+	return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.TransactionalID); err != nil {
+			return err
+		}
+	}
+	pe.putInt16(int16(r.RequiredAcks))
+	pe.putInt32(r.Timeout)
+	metricRegistry := pe.metricRegistry()
+	var batchSizeMetric metrics.Histogram
+	var compressionRatioMetric metrics.Histogram
+	if metricRegistry != nil {
+		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+	}
+	totalRecordCount := int64(0)
+
+	err := pe.putArrayLength(len(r.records))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.records {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		topicRecordCount := int64(0)
+		var topicCompressionRatioMetric metrics.Histogram
+		if metricRegistry != nil {
+			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+		}
+		for id, records := range partitions {
+			startOffset := pe.offset()
+			pe.putInt32(id)
+			pe.push(&lengthField{})
+			err = records.encode(pe)
+			if err != nil {
+				return err
+			}
+			err = pe.pop()
+			if err != nil {
+				return err
+			}
+			if metricRegistry != nil {
+				if r.Version >= 3 {
+					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+				} else {
+					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+				}
+				batchSize := int64(pe.offset() - startOffset)
+				batchSizeMetric.Update(batchSize)
+				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+			}
+		}
+		if topicRecordCount > 0 {
+			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+			totalRecordCount += topicRecordCount
+		}
+	}
+	if totalRecordCount > 0 {
+		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	if version >= 3 {
+		id, err := pd.getNullableString()
+		if err != nil {
+			return err
+		}
+		r.TransactionalID = id
+	}
+	requiredAcks, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.RequiredAcks = RequiredAcks(requiredAcks)
+	if r.Timeout, err = pd.getInt32(); err != nil {
+		return err
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.records = make(map[string]map[int32]Records)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.records[topic] = make(map[int32]Records)
+
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			size, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			recordsDecoder, err := pd.getSubset(int(size))
+			if err != nil {
+				return err
+			}
+			var records Records
+			if err := records.decode(recordsDecoder); err != nil {
+				return err
+			}
+			r.records[topic][partition] = records
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+	return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+	if r.records == nil {
+		r.records = make(map[string]map[int32]Records)
+	}
+
+	if r.records[topic] == nil {
+		r.records[topic] = make(map[int32]Records)
+	}
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+	r.ensureRecords(topic, partition)
+	set := r.records[topic][partition].MsgSet
+
+	if set == nil {
+		set = new(MessageSet)
+		r.records[topic][partition] = newLegacyRecords(set)
+	}
+
+	set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 0000000..4c5cd35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,189 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ProduceResponseBlock struct {
+	Err    KError
+	Offset int64
+	// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+	Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if millis, err := pd.getInt64(); err != nil {
+			return err
+		} else if millis != -1 {
+			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+		}
+	}
+
+	return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+	pe.putInt64(b.Offset)
+
+	if version >= 2 {
+		timestamp := int64(-1)
+		if !b.Timestamp.Before(time.Unix(0, 0)) {
+			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+		} else if !b.Timestamp.IsZero() {
+			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+		}
+		pe.putInt64(timestamp)
+	}
+
+	return nil
+}
+
+type ProduceResponse struct {
+	Blocks       map[string]map[int32]*ProduceResponseBlock
+	Version      int16
+	ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(ProduceResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	if r.Version >= 1 {
+		millis, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+
+		r.ThrottleTime = time.Duration(millis) * time.Millisecond
+	}
+
+	return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for id, prb := range partitions {
+			pe.putInt32(id)
+			err = prb.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+	return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+	return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*ProduceResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	block := &ProduceResponseBlock{
+		Err: err,
+	}
+	if r.Version >= 2 {
+		block.Timestamp = time.Now()
+	}
+	byTopic[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 0000000..b684aa4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,259 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"time"
+)
+
+type partitionSet struct {
+	msgs          []*ProducerMessage
+	recordsToSend Records
+	bufferBytes   int
+}
+
+type produceSet struct {
+	parent *asyncProducer
+	msgs   map[string]map[int32]*partitionSet
+
+	bufferBytes int
+	bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+	return &produceSet{
+		msgs:   make(map[string]map[int32]*partitionSet),
+		parent: parent,
+	}
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+	var err error
+	var key, val []byte
+
+	if msg.Key != nil {
+		if key, err = msg.Key.Encode(); err != nil {
+			return err
+		}
+	}
+
+	if msg.Value != nil {
+		if val, err = msg.Value.Encode(); err != nil {
+			return err
+		}
+	}
+
+	timestamp := msg.Timestamp
+	if timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+	timestamp = timestamp.Truncate(time.Millisecond)
+
+	partitions := ps.msgs[msg.Topic]
+	if partitions == nil {
+		partitions = make(map[int32]*partitionSet)
+		ps.msgs[msg.Topic] = partitions
+	}
+
+	var size int
+
+	set := partitions[msg.Partition]
+	if set == nil {
+		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+			batch := &RecordBatch{
+				FirstTimestamp:   timestamp,
+				Version:          2,
+				Codec:            ps.parent.conf.Producer.Compression,
+				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+				ProducerID:       ps.parent.txnmgr.producerID,
+				ProducerEpoch:    ps.parent.txnmgr.producerEpoch,
+			}
+			if ps.parent.conf.Producer.Idempotent {
+				batch.FirstSequence = msg.sequenceNumber
+			}
+			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+			size = recordBatchOverhead
+		} else {
+			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+		}
+		partitions[msg.Partition] = set
+	}
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+			return errors.New("assertion failed: message out of sequence added to a batch")
+		}
+		// We are being conservative here to avoid having to prep encode the record
+		size += maximumRecordOverhead
+		rec := &Record{
+			Key:            key,
+			Value:          val,
+			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+		}
+		size += len(key) + len(val)
+		if len(msg.Headers) > 0 {
+			rec.Headers = make([]*RecordHeader, len(msg.Headers))
+			for i := range msg.Headers {
+				rec.Headers[i] = &msg.Headers[i]
+				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+			}
+		}
+		set.recordsToSend.RecordBatch.addRecord(rec)
+	} else {
+		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+			msgToSend.Timestamp = timestamp
+			msgToSend.Version = 1
+		}
+		set.recordsToSend.MsgSet.addMessage(msgToSend)
+		size = producerMessageOverhead + len(key) + len(val)
+	}
+
+	set.bufferBytes += size
+	ps.bufferBytes += size
+	ps.bufferCount++
+
+	return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+	req := &ProduceRequest{
+		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+		req.Version = 2
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 3
+	}
+
+	for topic, partitionSets := range ps.msgs {
+		for partition, set := range partitionSets {
+			if req.Version >= 3 {
+				// If the API version we're hitting is 3 or greater, we need to calculate
+				// offsets for each record in the batch relative to FirstOffset.
+				// Additionally, we must set LastOffsetDelta to the value of the last offset
+				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
+				// final record of any batch will have an offset of (# of records in batch) - 1.
+				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+				//  under the RecordBatch section for details.)
+				rb := set.recordsToSend.RecordBatch
+				if len(rb.Records) > 0 {
+					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+					for i, record := range rb.Records {
+						record.OffsetDelta = int64(i)
+					}
+				}
+				req.AddBatch(topic, partition, rb)
+				continue
+			}
+			if ps.parent.conf.Producer.Compression == CompressionNone {
+				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+			} else {
+				// When compression is enabled, the entire set for each partition is compressed
+				// and sent as the payload of a single fake "message" with the appropriate codec
+				// set and no key. When the server sees a message with a compression codec, it
+				// decompresses the payload and treats the result as its message set.
+
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					// If our version is 0.10 or later, assign relative offsets
+					// to the inner messages. This lets the broker avoid
+					// recompressing the message set.
+					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+					// for details on relative offsets.)
+					for i, msg := range set.recordsToSend.MsgSet.Messages {
+						msg.Offset = int64(i)
+					}
+				}
+				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
+				if err != nil {
+					Logger.Println(err) // if this happens, it's basically our fault.
+					panic(err)
+				}
+				compMsg := &Message{
+					Codec:            ps.parent.conf.Producer.Compression,
+					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+					Key:              nil,
+					Value:            payload,
+					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+				}
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					compMsg.Version = 1
+					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+				}
+				req.AddMessage(topic, partition, compMsg)
+			}
+		}
+	}
+
+	return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			cb(topic, partition, set)
+		}
+	}
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+	if ps.msgs[topic] == nil {
+		return nil
+	}
+	set := ps.msgs[topic][partition]
+	if set == nil {
+		return nil
+	}
+	ps.bufferBytes -= set.bufferBytes
+	ps.bufferCount -= len(set.msgs)
+	delete(ps.msgs[topic], partition)
+	return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+	version := 1
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	switch {
+	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+	case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
+		return true
+	// Would we overflow the size-limit of a message-batch for this partition?
+	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+		return true
+	// Would we overflow simply in number of messages?
+	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) readyToFlush() bool {
+	switch {
+	// If we don't have any messages, nothing else matters
+	case ps.empty():
+		return false
+	// If all three config values are 0, we always flush as-fast-as-possible
+	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+		return true
+	// If we've passed the message trigger-point
+	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+		return true
+	// If we've passed the byte trigger-point
+	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) empty() bool {
+	return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 0000000..085cbb3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,332 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+var errVarintOverflow = PacketDecodingError{"varint overflow"}
+var errInvalidBool = PacketDecodingError{"invalid bool"}
+
+type realDecoder struct {
+	raw   []byte
+	off   int
+	stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+	if rd.remaining() < 1 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int8(rd.raw[rd.off])
+	rd.off++
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+	if rd.remaining() < 2 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+	rd.off += 2
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+	tmp, n := binary.Varint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	if n < 0 {
+		rd.off -= n
+		return -1, errVarintOverflow
+	}
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+	rd.off += 4
+	if tmp > rd.remaining() {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	} else if tmp > 2*math.MaxUint16 {
+		return -1, errInvalidArrayLength
+	}
+	return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+	b, err := rd.getInt8()
+	if err != nil || b == 0 {
+		return false, err
+	}
+	if b != 1 {
+		return false, errInvalidBool
+	}
+	return true, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+	tmp, err := rd.getInt32()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+	tmp, err := rd.getVarint()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+	length, err := rd.getInt16()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return "", err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 4*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 8*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += 8
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+	return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+	buf, err := rd.getRawBytes(length)
+	if err != nil {
+		return nil, err
+	}
+	return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+	if length < 0 {
+		return nil, errInvalidByteSliceLength
+	} else if length > rd.remaining() {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	start := rd.off
+	rd.off += length
+	return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+	if rd.remaining() < offset+length {
+		return nil, ErrInsufficientData
+	}
+	off := rd.off + offset
+	return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+	const byteLen = 1
+	if rd.remaining() < offset+byteLen {
+		return -1, ErrInsufficientData
+	}
+	return int8(rd.raw[rd.off+offset]), nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+	in.saveOffset(rd.off)
+
+	var reserve int
+	if dpd, ok := in.(dynamicPushDecoder); ok {
+		if err := dpd.decode(rd); err != nil {
+			return err
+		}
+	} else {
+		reserve = in.reserveLength()
+		if rd.remaining() < reserve {
+			rd.off = len(rd.raw)
+			return ErrInsufficientData
+		}
+	}
+
+	rd.stack = append(rd.stack, in)
+
+	rd.off += reserve
+
+	return nil
+}
+
+func (rd *realDecoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := rd.stack[len(rd.stack)-1]
+	rd.stack = rd.stack[:len(rd.stack)-1]
+
+	return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 0000000..3c75387
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+	"encoding/binary"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+	raw      []byte
+	off      int
+	stack    []pushEncoder
+	registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+	re.raw[re.off] = byte(in)
+	re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+	re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+	re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+	re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+	re.putInt32(int32(in))
+	return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+	if in {
+		re.putInt8(1)
+		return
+	}
+	re.putInt8(0)
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	re.putInt32(int32(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		re.putVarint(-1)
+		return nil
+	}
+	re.putVarint(int64(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+	re.putInt16(int16(len(in)))
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt16(-1)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt64(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) offset() int {
+	return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+	in.saveOffset(re.off)
+	re.off += in.reserveLength()
+	re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := re.stack[len(re.stack)-1]
+	re.stack = re.stack[:len(re.stack)-1]
+
+	return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+	return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
new file mode 100644
index 0000000..cdccfe3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -0,0 +1,116 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+const (
+	isTransactionalMask   = 0x10
+	controlMask           = 0x20
+	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
+)
+
+//RecordHeader stores key and value for a record header
+type RecordHeader struct {
+	Key   []byte
+	Value []byte
+}
+
+func (h *RecordHeader) encode(pe packetEncoder) error {
+	if err := pe.putVarintBytes(h.Key); err != nil {
+		return err
+	}
+	return pe.putVarintBytes(h.Value)
+}
+
+func (h *RecordHeader) decode(pd packetDecoder) (err error) {
+	if h.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if h.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+	return nil
+}
+
+//Record is kafka record type
+type Record struct {
+	Headers []*RecordHeader
+
+	Attributes     int8
+	TimestampDelta time.Duration
+	OffsetDelta    int64
+	Key            []byte
+	Value          []byte
+	length         varintLengthField
+}
+
+func (r *Record) encode(pe packetEncoder) error {
+	pe.push(&r.length)
+	pe.putInt8(r.Attributes)
+	pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
+	pe.putVarint(r.OffsetDelta)
+	if err := pe.putVarintBytes(r.Key); err != nil {
+		return err
+	}
+	if err := pe.putVarintBytes(r.Value); err != nil {
+		return err
+	}
+	pe.putVarint(int64(len(r.Headers)))
+
+	for _, h := range r.Headers {
+		if err := h.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return pe.pop()
+}
+
+func (r *Record) decode(pd packetDecoder) (err error) {
+	if err = pd.push(&r.length); err != nil {
+		return err
+	}
+
+	if r.Attributes, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	timestamp, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+	r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
+
+	if r.OffsetDelta, err = pd.getVarint(); err != nil {
+		return err
+	}
+
+	if r.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if r.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	numHeaders, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+
+	if numHeaders >= 0 {
+		r.Headers = make([]*RecordHeader, numHeaders)
+	}
+	for i := int64(0); i < numHeaders; i++ {
+		hdr := new(RecordHeader)
+		if err := hdr.decode(pd); err != nil {
+			return err
+		}
+		r.Headers[i] = hdr
+	}
+
+	return pd.pop()
+}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
new file mode 100644
index 0000000..c653763
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -0,0 +1,225 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+	for _, r := range e {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+	for i := range e {
+		rec := &Record{}
+		if err := rec.decode(pd); err != nil {
+			return err
+		}
+		e[i] = rec
+	}
+	return nil
+}
+
+type RecordBatch struct {
+	FirstOffset           int64
+	PartitionLeaderEpoch  int32
+	Version               int8
+	Codec                 CompressionCodec
+	CompressionLevel      int
+	Control               bool
+	LogAppendTime         bool
+	LastOffsetDelta       int32
+	FirstTimestamp        time.Time
+	MaxTimestamp          time.Time
+	ProducerID            int64
+	ProducerEpoch         int16
+	FirstSequence         int32
+	Records               []*Record
+	PartialTrailingRecord bool
+	IsTransactional       bool
+
+	compressedRecords []byte
+	recordsLen        int // uncompressed records size
+}
+
+func (b *RecordBatch) LastOffset() int64 {
+	return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+	if b.Version != 2 {
+		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+	}
+	pe.putInt64(b.FirstOffset)
+	pe.push(&lengthField{})
+	pe.putInt32(b.PartitionLeaderEpoch)
+	pe.putInt8(b.Version)
+	pe.push(newCRC32Field(crcCastagnoli))
+	pe.putInt16(b.computeAttributes())
+	pe.putInt32(b.LastOffsetDelta)
+
+	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	pe.putInt64(b.ProducerID)
+	pe.putInt16(b.ProducerEpoch)
+	pe.putInt32(b.FirstSequence)
+
+	if err := pe.putArrayLength(len(b.Records)); err != nil {
+		return err
+	}
+
+	if b.compressedRecords == nil {
+		if err := b.encodeRecords(pe); err != nil {
+			return err
+		}
+	}
+	if err := pe.putRawBytes(b.compressedRecords); err != nil {
+		return err
+	}
+
+	if err := pe.pop(); err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+	if b.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	batchLen, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if b.Version, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	crc32Decoder := acquireCrc32Field(crcCastagnoli)
+	defer releaseCrc32Field(crc32Decoder)
+
+	if err = pd.push(crc32Decoder); err != nil {
+		return err
+	}
+
+	attributes, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+	b.Control = attributes&controlMask == controlMask
+	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
+	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
+
+	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if b.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.FirstSequence, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	numRecs, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if numRecs >= 0 {
+		b.Records = make([]*Record, numRecs)
+	}
+
+	bufSize := int(batchLen) - recordBatchOverhead
+	recBuffer, err := pd.getRawBytes(bufSize)
+	if err != nil {
+		if err == ErrInsufficientData {
+			b.PartialTrailingRecord = true
+			b.Records = nil
+			return nil
+		}
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	recBuffer, err = decompress(b.Codec, recBuffer)
+	if err != nil {
+		return err
+	}
+
+	b.recordsLen = len(recBuffer)
+	err = decode(recBuffer, recordsArray(b.Records))
+	if err == ErrInsufficientData {
+		b.PartialTrailingRecord = true
+		b.Records = nil
+		return nil
+	}
+	return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+	var raw []byte
+	var err error
+	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+		return err
+	}
+	b.recordsLen = len(raw)
+
+	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+	return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+	attr := int16(b.Codec) & int16(compressionCodecMask)
+	if b.Control {
+		attr |= controlMask
+	}
+	if b.LogAppendTime {
+		attr |= timestampTypeMask
+	}
+	if b.IsTransactional {
+		attr |= isTransactionalMask
+	}
+	return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+	b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
new file mode 100644
index 0000000..98160c7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -0,0 +1,204 @@
+package sarama
+
+import "fmt"
+
+const (
+	unknownRecords = iota
+	legacyRecords
+	defaultRecords
+
+	magicOffset = 16
+	magicLength = 1
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+	recordsType int
+	MsgSet      *MessageSet
+	RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+	return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+	return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+	if r.MsgSet == nil && r.RecordBatch == nil {
+		return true, nil
+	}
+	if r.MsgSet != nil && r.RecordBatch != nil {
+		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+	}
+	r.recordsType = defaultRecords
+	if r.MsgSet != nil {
+		r.recordsType = legacyRecords
+	}
+	return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return nil
+		}
+		return r.MsgSet.encode(pe)
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil
+		}
+		return r.RecordBatch.encode(pe)
+	}
+
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+	magic, err := magicValue(pd)
+	if err != nil {
+		return err
+	}
+
+	r.recordsType = defaultRecords
+	if magic < 2 {
+		r.recordsType = legacyRecords
+	}
+
+	return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+	if r.recordsType == unknownRecords {
+		if err := r.setTypeFromMagic(pd); err != nil {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		r.MsgSet = &MessageSet{}
+		return r.MsgSet.decode(pd)
+	case defaultRecords:
+		r.RecordBatch = &RecordBatch{}
+		return r.RecordBatch.decode(pd)
+	}
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return 0, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return 0, nil
+		}
+		return len(r.MsgSet.Messages), nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return 0, nil
+		}
+		return len(r.RecordBatch.Records), nil
+	}
+	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.PartialTrailingMessage, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.PartialTrailingRecord, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		return false, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.Control, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.OverflowMessage, nil
+	case defaultRecords:
+		return false, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+	return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+	if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
+		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
+	}
+
+	firstRecord := r.RecordBatch.Records[0]
+	controlRecord := ControlRecord{}
+	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+	if err != nil {
+		return ControlRecord{}, err
+	}
+
+	return controlRecord, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 0000000..97437d6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,171 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+type protocolBody interface {
+	encoder
+	versionedDecoder
+	key() int16
+	version() int16
+	requiredVersion() KafkaVersion
+}
+
+type request struct {
+	correlationID int32
+	clientID      string
+	body          protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) error {
+	pe.push(&lengthField{})
+	pe.putInt16(r.body.key())
+	pe.putInt16(r.body.version())
+	pe.putInt32(r.correlationID)
+
+	err := pe.putString(r.clientID)
+	if err != nil {
+		return err
+	}
+
+	err = r.body.encode(pe)
+	if err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+	key, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	version, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	r.clientID, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	r.body = allocateBody(key, version)
+	if r.body == nil {
+		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+	}
+
+	return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (*request, int, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, bytesRead, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, bytesRead, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	req := &request{}
+	if err := decode(encodedReq, req); err != nil {
+		return nil, bytesRead, err
+	}
+
+	return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+	switch key {
+	case 0:
+		return &ProduceRequest{}
+	case 1:
+		return &FetchRequest{}
+	case 2:
+		return &OffsetRequest{Version: version}
+	case 3:
+		return &MetadataRequest{}
+	case 8:
+		return &OffsetCommitRequest{Version: version}
+	case 9:
+		return &OffsetFetchRequest{}
+	case 10:
+		return &FindCoordinatorRequest{}
+	case 11:
+		return &JoinGroupRequest{}
+	case 12:
+		return &HeartbeatRequest{}
+	case 13:
+		return &LeaveGroupRequest{}
+	case 14:
+		return &SyncGroupRequest{}
+	case 15:
+		return &DescribeGroupsRequest{}
+	case 16:
+		return &ListGroupsRequest{}
+	case 17:
+		return &SaslHandshakeRequest{}
+	case 18:
+		return &ApiVersionsRequest{}
+	case 19:
+		return &CreateTopicsRequest{}
+	case 20:
+		return &DeleteTopicsRequest{}
+	case 21:
+		return &DeleteRecordsRequest{}
+	case 22:
+		return &InitProducerIDRequest{}
+	case 24:
+		return &AddPartitionsToTxnRequest{}
+	case 25:
+		return &AddOffsetsToTxnRequest{}
+	case 26:
+		return &EndTxnRequest{}
+	case 28:
+		return &TxnOffsetCommitRequest{}
+	case 29:
+		return &DescribeAclsRequest{}
+	case 30:
+		return &CreateAclsRequest{}
+	case 31:
+		return &DeleteAclsRequest{}
+	case 32:
+		return &DescribeConfigsRequest{}
+	case 33:
+		return &AlterConfigsRequest{}
+	case 35:
+		return &DescribeLogDirsRequest{}
+	case 36:
+		return &SaslAuthenticateRequest{}
+	case 37:
+		return &CreatePartitionsRequest{}
+	case 42:
+		return &DeleteGroupsRequest{}
+	}
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 0000000..7a75918
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,24 @@
+package sarama
+
+import "fmt"
+
+const responseLengthSize = 4
+const correlationIDSize = 4
+
+type responseHeader struct {
+	length        int32
+	correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+	r.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if r.length <= 4 || r.length > MaxResponseSize {
+		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+	}
+
+	r.correlationID, err = pd.getInt32()
+	return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 0000000..1e0277a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,106 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use Consumer or Consumer-Group API.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| Name                                         | Type       | Description                                                   |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| incoming-byte-rate                           | meter      | Bytes/second read off all brokers                             |
+	| incoming-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second read off a given broker                          |
+	| outgoing-byte-rate                           | meter      | Bytes/second written off all brokers                          |
+	| outgoing-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second written off a given broker                       |
+	| request-rate                                 | meter      | Requests/second sent to all brokers                           |
+	| request-rate-for-broker-<broker-id>          | meter      | Requests/second sent to a given broker                        |
+	| request-size                                 | histogram  | Distribution of the request size in bytes for all brokers     |
+	| request-size-for-broker-<broker-id>          | histogram  | Distribution of the request size in bytes for a given broker  |
+	| request-latency-in-ms                        | histogram  | Distribution of the request latency in ms for all brokers     |
+	| request-latency-in-ms-for-broker-<broker-id> | histogram  | Distribution of the request latency in ms for a given broker  |
+	| response-rate                                | meter      | Responses/second received from all brokers                    |
+	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
+	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
+	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
+	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
+	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
+	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
+	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
+	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
+	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
+	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+Consumer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+	"io/ioutil"
+	"log"
+)
+
+var (
+	// Logger is the instance of a StdLogger interface that Sarama writes connection
+	// management events to. By default it is set to discard all log messages via ioutil.Discard,
+	// but you can set it to redirect wherever you want.
+	Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+	PanicHandler func(interface{})
+
+	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+	// to process.
+	MaxRequestSize int32 = 100 * 1024 * 1024
+
+	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+	MaxResponseSize int32 = 100 * 1024 * 1024
+)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+	Println(v ...interface{})
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
new file mode 100644
index 0000000..54c8b09
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
@@ -0,0 +1,29 @@
+package sarama
+
+type SaslAuthenticateRequest struct {
+	SaslAuthBytes []byte
+}
+
+// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API
+const APIKeySASLAuth = 36
+
+func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
+	return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.SaslAuthBytes, err = pd.getBytes()
+	return err
+}
+
+func (r *SaslAuthenticateRequest) key() int16 {
+	return APIKeySASLAuth
+}
+
+func (r *SaslAuthenticateRequest) version() int16 {
+	return 0
+}
+
+func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
new file mode 100644
index 0000000..0038c3f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+type SaslAuthenticateResponse struct {
+	Err           KError
+	ErrorMessage  *string
+	SaslAuthBytes []byte
+}
+
+func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+	return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	r.SaslAuthBytes, err = pd.getBytes()
+
+	return err
+}
+
+func (r *SaslAuthenticateResponse) key() int16 {
+	return APIKeySASLAuth
+}
+
+func (r *SaslAuthenticateResponse) version() int16 {
+	return 0
+}
+
+func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..fe5ba05
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,34 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+	Mechanism string
+	Version   int16
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Mechanism); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.Mechanism, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..ef290d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+	Err               KError
+	EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
new file mode 100644
index 0000000..bb0c82c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
@@ -0,0 +1,124 @@
+package sarama
+
+type topicPartitionAssignment struct {
+	Topic     string
+	Partition int32
+}
+
+type StickyAssignorUserData interface {
+	partitions() []topicPartitionAssignment
+	hasGeneration() bool
+	generation() int
+}
+
+//StickyAssignorUserDataV0 holds topic partition information for an assignment
+type StickyAssignorUserDataV0 struct {
+	Topics map[string][]int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV0) hasGeneration() bool                    { return false }
+func (m *StickyAssignorUserDataV0) generation() int                        { return defaultGeneration }
+
+//StickyAssignorUserDataV1 holds topic partition information for an assignment
+type StickyAssignorUserDataV1 struct {
+	Topics     map[string][]int32
+	Generation int32
+
+	topicPartitions []topicPartitionAssignment
+}
+
+func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(m.Generation)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) {
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	m.Generation, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	m.topicPartitions = populateTopicPartitions(m.Topics)
+	return nil
+}
+
+func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions }
+func (m *StickyAssignorUserDataV1) hasGeneration() bool                    { return true }
+func (m *StickyAssignorUserDataV1) generation() int                        { return int(m.Generation) }
+
+func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment {
+	topicPartitions := make([]topicPartitionAssignment, 0)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition})
+		}
+	}
+	return topicPartitions
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 0000000..fe20708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+	GroupId          string
+	GenerationId     int32
+	MemberId         string
+	GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+		return err
+	}
+	for memberId, memberAssignment := range r.GroupAssignments {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := pe.putBytes(memberAssignment); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupAssignments = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		memberAssignment, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.GroupAssignments[memberId] = memberAssignment
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+	if r.GroupAssignments == nil {
+		r.GroupAssignments = make(map[string][]byte)
+	}
+
+	r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+	bin, err := encode(memberAssignment, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupAssignment(memberId, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 0000000..194b382
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+	Err              KError
+	MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(r.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	r.MemberAssignment, err = pd.getBytes()
+	return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 0000000..021c5a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,149 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+	// SendMessage produces a given message, and returns only when it either has
+	// succeeded or failed to produce. It will return the partition and the offset
+	// of the produced message, or an error if the message failed to produce.
+	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+	// SendMessages produces a given set of messages, and returns only when all
+	// messages in the set have either succeeded or failed. Note that messages
+	// can succeed and fail individually; if some succeed and some fail,
+	// SendMessages will return an error.
+	SendMessages(msgs []*ProducerMessage) error
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+}
+
+type syncProducer struct {
+	producer *asyncProducer
+	wg       sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+	if config == nil {
+		config = NewConfig()
+		config.Producer.Return.Successes = true
+	}
+
+	if err := verifyProducerConfig(config); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducer(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+	if err := verifyProducerConfig(client.Config()); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+	sp := &syncProducer{producer: p}
+
+	sp.wg.Add(2)
+	go withRecover(sp.handleSuccesses)
+	go withRecover(sp.handleErrors)
+
+	return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+	if !config.Producer.Return.Errors {
+		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+	}
+	if !config.Producer.Return.Successes {
+		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+	}
+	return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+	expectation := make(chan *ProducerError, 1)
+	msg.expectation = expectation
+	sp.producer.Input() <- msg
+
+	if err := <-expectation; err != nil {
+		return -1, -1, err.Err
+	}
+
+	return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+	expectations := make(chan chan *ProducerError, len(msgs))
+	go func() {
+		for _, msg := range msgs {
+			expectation := make(chan *ProducerError, 1)
+			msg.expectation = expectation
+			sp.producer.Input() <- msg
+			expectations <- expectation
+		}
+		close(expectations)
+	}()
+
+	var errors ProducerErrors
+	for expectation := range expectations {
+		if err := <-expectation; err != nil {
+			errors = append(errors, err)
+		}
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+	defer sp.wg.Done()
+	for msg := range sp.producer.Successes() {
+		expectation := msg.expectation
+		expectation <- nil
+	}
+}
+
+func (sp *syncProducer) handleErrors() {
+	defer sp.wg.Done()
+	for err := range sp.producer.Errors() {
+		expectation := err.Msg.expectation
+		expectation <- err
+	}
+}
+
+func (sp *syncProducer) Close() error {
+	sp.producer.AsyncClose()
+	sp.wg.Wait()
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go
new file mode 100644
index 0000000..372278d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/timestamp.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type Timestamp struct {
+	*time.Time
+}
+
+func (t Timestamp) encode(pe packetEncoder) error {
+	timestamp := int64(-1)
+
+	if !t.Before(time.Unix(0, 0)) {
+		timestamp = t.UnixNano() / int64(time.Millisecond)
+	} else if !t.IsZero() {
+		return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
+	}
+
+	pe.putInt64(timestamp)
+	return nil
+}
+
+func (t Timestamp) decode(pd packetDecoder) error {
+	millis, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// negative timestamps are invalid, in these cases we should return
+	// a zero time
+	timestamp := time.Time{}
+	if millis >= 0 {
+		timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+	}
+
+	*t.Time = timestamp
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..71e95b8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+	TransactionalID string
+	GroupID         string
+	ProducerID      int64
+	ProducerEpoch   int16
+	Topics          map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(t.TransactionalID); err != nil {
+		return err
+	}
+	if err := pe.putString(t.GroupID); err != nil {
+		return err
+	}
+	pe.putInt64(t.ProducerID)
+	pe.putInt16(t.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+	for topic, partitions := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for _, partition := range partitions {
+			if err := partition.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	if t.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionOffsetMetadata)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+		for j := 0; j < m; j++ {
+			partitionOffsetMetadata := new(PartitionOffsetMetadata)
+			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+				return err
+			}
+			t.Topics[topic][j] = partitionOffsetMetadata
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type PartitionOffsetMetadata struct {
+	Partition int32
+	Offset    int64
+	Metadata  *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt64(p.Offset)
+	if err := pe.putNullableString(p.Metadata); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if p.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if p.Metadata, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..6c980f4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -0,0 +1,83 @@
+package sarama
+
+import (
+	"time"
+)
+
+type TxnOffsetCommitResponse struct {
+	ThrottleTime time.Duration
+	Topics       map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+
+	for topic, e := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			t.Topics[topic][j] = new(PartitionError)
+			if err := t.Topics[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 0000000..9392793
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,227 @@
+package sarama
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"regexp"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+	return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+	return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+	ret := make([]int32, 0, len(input))
+	for _, val := range input {
+		ret = append(ret, val)
+	}
+	return ret
+}
+
+func withRecover(fn func()) {
+	defer func() {
+		handler := PanicHandler
+		if handler != nil {
+			if err := recover(); err != nil {
+				handler(err)
+			}
+		}
+	}()
+
+	fn()
+}
+
+func safeAsyncClose(b *Broker) {
+	tmp := b // local var prevents clobbering in goroutine
+	go withRecover(func() {
+		if connected, _ := tmp.Connected(); connected {
+			if err := tmp.Close(); err != nil {
+				Logger.Println("Error closing broker", tmp.ID(), ":", err)
+			}
+		}
+	})
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+	Encode() ([]byte, error)
+	Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+	return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+	return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+	return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+	return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+	net.Conn
+	buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+	return &bufConn{
+		Conn: conn,
+		buf:  bufio.NewReader(conn),
+	}
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+	return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+	// it's a struct rather than just typing the array directly to make it opaque and stop people
+	// generating their own arbitrary versions
+	version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+	return KafkaVersion{
+		version: [4]uint{major, minor, veryMinor, patch},
+	}
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//    V1.IsAtLeast(V2) // false
+//    V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+	for i := range v.version {
+		if v.version[i] > other.version[i] {
+			return true
+		} else if v.version[i] < other.version[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
+	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
+	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
+	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
+	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
+	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
+	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
+	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
+	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
+	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
+	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
+	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
+	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
+
+	SupportedVersions = []KafkaVersion{
+		V0_8_2_0,
+		V0_8_2_1,
+		V0_8_2_2,
+		V0_9_0_0,
+		V0_9_0_1,
+		V0_10_0_0,
+		V0_10_0_1,
+		V0_10_1_0,
+		V0_10_1_1,
+		V0_10_2_0,
+		V0_10_2_1,
+		V0_11_0_0,
+		V0_11_0_1,
+		V0_11_0_2,
+		V1_0_0_0,
+		V1_1_0_0,
+		V1_1_1_0,
+		V2_0_0_0,
+		V2_0_1_0,
+		V2_1_0_0,
+		V2_2_0_0,
+		V2_3_0_0,
+		V2_4_0_0,
+	}
+	MinVersion = V0_8_2_0
+	MaxVersion = V2_4_0_0
+)
+
+//ParseKafkaVersion parses and returns kafka version or error from a string
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+	if len(s) < 5 {
+		return MinVersion, fmt.Errorf("invalid version `%s`", s)
+	}
+	var major, minor, veryMinor, patch uint
+	var err error
+	if s[0] == '0' {
+		err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+	} else {
+		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+	}
+	if err != nil {
+		return MinVersion, err
+	}
+	return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
+	if !regexp.MustCompile(pattern).MatchString(s) {
+		return fmt.Errorf("invalid version `%s`", s)
+	}
+	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+	return err
+}
+
+func (v KafkaVersion) String() string {
+	if v.version[0] == 0 {
+		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+	}
+
+	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
new file mode 100644
index 0000000..58880e2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd.go
@@ -0,0 +1,27 @@
+package sarama
+
+import (
+	"github.com/klauspost/compress/zstd"
+	"sync"
+)
+
+var (
+	zstdDec *zstd.Decoder
+	zstdEnc *zstd.Encoder
+
+	zstdEncOnce, zstdDecOnce sync.Once
+)
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+	zstdDecOnce.Do(func() {
+		zstdDec, _ = zstd.NewReader(nil)
+	})
+	return zstdDec.DecodeAll(src, dst)
+}
+
+func zstdCompress(dst, src []byte) ([]byte, error) {
+	zstdEncOnce.Do(func() {
+		zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
+	})
+	return zstdEnc.EncodeAll(src, dst), nil
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 0000000..698a3f5
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 0000000..2d1b3d9
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+	result := b.Run(func() error {
+		// communicate with some external service and
+		// return an error if the communication failed
+		return nil
+	})
+
+	switch result {
+	case nil:
+		// success!
+	case breaker.ErrBreakerOpen:
+		// our function wasn't run because the breaker was open
+	default:
+		// some other error
+	}
+}
+```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 0000000..f88ca72
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+	closed uint32 = iota
+	open
+	halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+	errorThreshold, successThreshold int
+	timeout                          time.Duration
+
+	lock              sync.Mutex
+	state             uint32
+	errors, successes int
+	lastError         time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+	return &Breaker{
+		errorThreshold:   errorThreshold,
+		successThreshold: successThreshold,
+		timeout:          timeout,
+	}
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	// errcheck complains about ignoring the error return value, but
+	// that's on purpose; if you want an error from a goroutine you have to
+	// get it over a channel or something
+	go b.doWork(state, work)
+
+	return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+	var panicValue interface{}
+
+	result := func() error {
+		defer func() {
+			panicValue = recover()
+		}()
+		return work()
+	}()
+
+	if result == nil && panicValue == nil && state == closed {
+		// short-circuit the normal, success path without contending
+		// on the lock
+		return nil
+	}
+
+	// oh well, I guess we have to contend on the lock
+	b.processResult(result, panicValue)
+
+	if panicValue != nil {
+		// as close as Go lets us come to a "rethrow" although unfortunately
+		// we lose the original panicing location
+		panic(panicValue)
+	}
+
+	return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if result == nil && panicValue == nil {
+		if b.state == halfOpen {
+			b.successes++
+			if b.successes == b.successThreshold {
+				b.closeBreaker()
+			}
+		}
+	} else {
+		if b.errors > 0 {
+			expiry := b.lastError.Add(b.timeout)
+			if time.Now().After(expiry) {
+				b.errors = 0
+			}
+		}
+
+		switch b.state {
+		case closed:
+			b.errors++
+			if b.errors == b.errorThreshold {
+				b.openBreaker()
+			} else {
+				b.lastError = time.Now()
+			}
+		case halfOpen:
+			b.openBreaker()
+		}
+	}
+}
+
+func (b *Breaker) openBreaker() {
+	b.changeState(open)
+	go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+	b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+	time.Sleep(b.timeout)
+
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+	b.errors = 0
+	b.successes = 0
+	atomic.StoreUint32(&b.state, newState)
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
new file mode 100644
index 0000000..d6cf4f1
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+- 1.5.4
+- 1.6.1
+
+sudo: false
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
new file mode 100644
index 0000000..5bf3688
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
new file mode 100644
index 0000000..3f2695c
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/README.md
@@ -0,0 +1,13 @@
+# go-xerial-snappy
+
+[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy)
+
+Xerial-compatible Snappy framing support for golang.
+
+Packages using Xerial for snappy encoding use a framing format incompatible with
+basically everything else in existence. This package wraps Go's built-in snappy
+package to support it.
+
+Apps that use this format include Apache Kafka (see
+https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
+details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
new file mode 100644
index 0000000..6a46f47
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
@@ -0,0 +1,16 @@
+// +build gofuzz
+
+package snappy
+
+func Fuzz(data []byte) int {
+	decode, err := Decode(data)
+	if decode == nil && err == nil {
+		panic("nil error with nil result")
+	}
+
+	if err != nil {
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
new file mode 100644
index 0000000..ea8f7af
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -0,0 +1,131 @@
+package snappy
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+
+	master "github.com/golang/snappy"
+)
+
+const (
+	sizeOffset = 16
+	sizeBytes  = 4
+)
+
+var (
+	xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+	// This is xerial version 1 and minimally compatible with version 1
+	xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1}
+
+	// ErrMalformed is returned by the decoder when the xerial framing
+	// is malformed
+	ErrMalformed = errors.New("malformed xerial framing")
+)
+
+func min(x, y int) int {
+    if x < y {
+        return x
+    }
+    return y
+}
+
+// Encode encodes data as snappy with no framing header.
+func Encode(src []byte) []byte {
+	return master.Encode(nil, src)
+}
+
+// EncodeStream *appends* to the specified 'dst' the compressed
+// 'src' in xerial framing format. If 'dst' does not have enough
+// capacity, then a new slice will be allocated. If 'dst' has
+// non-zero length, then if *must* have been built using this function.
+func EncodeStream(dst, src []byte) []byte {
+	if len(dst) == 0 {
+		dst = append(dst, xerialHeader...)
+		dst = append(dst, xerialVersionInfo...)
+	}
+
+	// Snappy encode in blocks of maximum 32KB
+	var (
+		max = len(src)
+		blockSize = 32 * 1024
+		pos   = 0
+		chunk []byte
+	)
+
+	for pos < max {
+		newPos := min(pos + blockSize, max)
+		chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos])
+
+		// First encode the compressed size (big-endian)
+		// Put* panics if the buffer is too small, so pad 4 bytes first
+		origLen := len(dst)
+		dst = append(dst, dst[0:4]...)
+		binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk)))
+
+		// And now the compressed data
+		dst = append(dst, chunk...)
+		pos = newPos
+	}
+	return dst
+}
+
+// Decode decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format.
+func Decode(src []byte) ([]byte, error) {
+	return DecodeInto(nil, src)
+}
+
+// DecodeInto decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format into the specified `dst`.
+// It is assumed that the entirety of `dst` including all capacity is available
+// for use by this function. If `dst` is nil *or* insufficiently large to hold
+// the decoded `src`, new space will be allocated.
+func DecodeInto(dst, src []byte) ([]byte, error) {
+	var max = len(src)
+	if max < len(xerialHeader) {
+		return nil, ErrMalformed
+	}
+
+	if !bytes.Equal(src[:8], xerialHeader) {
+		return master.Decode(dst[:cap(dst)], src)
+	}
+
+	if max < sizeOffset+sizeBytes {
+		return nil, ErrMalformed
+	}
+
+	if dst == nil {
+		dst = make([]byte, 0, len(src))
+	}
+
+	dst = dst[:0]
+	var (
+		pos   = sizeOffset
+		chunk []byte
+		err       error
+	)
+
+	for pos+sizeBytes <= max {
+		size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes]))
+		pos += sizeBytes
+
+		nextPos := pos + size
+		// On architectures where int is 32-bytes wide size + pos could
+		// overflow so we need to check the low bound as well as the
+		// high
+		if nextPos < pos || nextPos > max {
+			return nil, ErrMalformed
+		}
+
+		chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos])
+
+		if err != nil {
+			return nil, err
+		}
+		pos = nextPos
+		dst = append(dst, chunk...)
+	}
+	return dst, nil
+}
diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore
new file mode 100644
index 0000000..8365624
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml
new file mode 100644
index 0000000..235a40a
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+
+go:
+  - 1.2
+  - 1.3
+  - 1.4
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
new file mode 100644
index 0000000..d5f36db
--- /dev/null
+++ b/vendor/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
new file mode 100644
index 0000000..8e78233
--- /dev/null
+++ b/vendor/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue)
+[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
new file mode 100644
index 0000000..71d1acd
--- /dev/null
+++ b/vendor/github.com/eapache/queue/queue.go
@@ -0,0 +1,102 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+// minQueueLen is smallest capacity that queue may have.
+// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+	buf               []interface{}
+	head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+	return &Queue{
+		buf: make([]interface{}, minQueueLen),
+	}
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+	return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+	newBuf := make([]interface{}, q.count<<1)
+
+	if q.tail > q.head {
+		copy(newBuf, q.buf[q.head:q.tail])
+	} else {
+		n := copy(newBuf, q.buf[q.head:])
+		copy(newBuf[n:], q.buf[:q.tail])
+	}
+
+	q.head = 0
+	q.tail = q.count
+	q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+	if q.count == len(q.buf) {
+		q.resize()
+	}
+
+	q.buf[q.tail] = elem
+	// bitwise modulus
+	q.tail = (q.tail + 1) & (len(q.buf) - 1)
+	q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+	if q.count <= 0 {
+		panic("queue: Peek() called on empty queue")
+	}
+	return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic. This method accepts both positive and
+// negative index values. Index 0 refers to the first element, and
+// index -1 refers to the last.
+func (q *Queue) Get(i int) interface{} {
+	// If indexing backwards, convert to positive index.
+	if i < 0 {
+		i += q.count
+	}
+	if i < 0 || i >= q.count {
+		panic("queue: Get() called with index out of range")
+	}
+	// bitwise modulus
+	return q.buf[(q.head+i)&(len(q.buf)-1)]
+}
+
+// Remove removes and returns the element from the front of the queue. If the
+// queue is empty, the call will panic.
+func (q *Queue) Remove() interface{} {
+	if q.count <= 0 {
+		panic("queue: Remove() called on empty queue")
+	}
+	ret := q.buf[q.head]
+	q.buf[q.head] = nil
+	// bitwise modulus
+	q.head = (q.head + 1) & (len(q.buf) - 1)
+	q.count--
+	// Resize down if buffer 1/4 full.
+	if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
+		q.resize()
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							di.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						di.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f9b6e41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1 := extensionAsLegacyType(e1.value)
+		m2 := extensionAsLegacyType(e2.value)
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..fa88add
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc *ExtensionDesc
+
+	// value is a concrete value for the extension field. Let the type of
+	// desc.ExtensionType be the "API type" and the type of Extension.value
+	// be the "storage type". The API type and storage type are the same except:
+	//	* For scalars (except []byte), the API type uses *T,
+	//	while the storage type uses T.
+	//	* For repeated fields, the API type uses []T, while the storage type
+	//	uses *[]T.
+	//
+	// The reason for the divergence is so that the storage type more naturally
+	// matches what is expected of when retrieving the values through the
+	// protobuf reflection APIs.
+	//
+	// The value may only be populated if desc is also populated.
+	value interface{}
+
+	// enc is the raw bytes for the extension field.
+	enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if err := checkExtensionTypes(epb, extension); err != nil {
+			return nil, err
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return extensionAsLegacyType(e.value), nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = extensionAsStorageType(v)
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		// Represent primitive types as a pointer to the value.
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Slice:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	}
+	return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	case reflect.Slice:
+		// Represent slice types as a pointer to the value.
+		if rv.Type().Elem().Kind() != reflect.Uint8 {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			v = rv2.Interface()
+		}
+	}
+	return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	if deref {
+		u = u.Elem()
+	}
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	} else {
+		// The interface is not of pointer type. The data word is the pointer
+		// to the data.
+		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+	}
+	if deref {
+		p.p = *(*unsafe.Pointer)(p.p)
+	}
+	return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..a4b8c0c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+
+	stype reflect.Type      // set for struct types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		log.Printf("proto: tag has too few fields: %q", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		log.Printf("proto: tag has unknown wire type: %q", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	switch t1 := typ; t1.Kind() {
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+
+	case reflect.Slice:
+		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+			p.stype = t2.Elem()
+		}
+
+	case reflect.Map:
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	var oots []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oots = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oots = m.XXX_OneofWrappers()
+	}
+	if len(oots) > 0 {
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+	deref     bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.sizecache = invalidField
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			u.v1extensions = toField(&f)
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+		t = t.Elem()
+	}
+	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	var deref bool
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		t = reflect.PtrTo(t)
+		deref = true
+	}
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizer,
+		marshaler: marshaler,
+		isptr:     t.Kind() == reflect.Ptr,
+		deref:     deref,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizer,
+			marshaler: marshaler,
+		}
+	}
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if slice {
+				return makeMessageSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeMessageMarshaler(getMarshalInfo(t))
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, err := m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("message field %s without pointer", tf))
+			case isSlice: // E.g., []*pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mi.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mi.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+				panic("bad type for XXX_extensions field: " + f.Type.Name())
+			}
+			u.oldExtensions = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+	for _, v := range oneofImplementers {
+		tptr := reflect.TypeOf(v) // *Msg_X
+		typ := tptr.Elem()        // Msg_X
+
+		f := typ.Field(0) // oneof implementers have one field
+		baseUnmarshal := fieldUnmarshaler(&f)
+		tags := strings.Split(f.Tag.Get("protobuf"), ",")
+		fieldNum, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tags[1])
+		}
+		var name string
+		for _, tag := range tags {
+			if strings.HasPrefix(tag, "name=") {
+				name = strings.TrimPrefix(tag, "name=")
+				break
+			}
+		}
+
+		// Find the oneof field that this struct implements.
+		// Might take O(n^2) to process all of the oneofs, but who cares.
+		for _, of := range oneofFields {
+			if tptr.Implements(of.ityp) {
+				// We have found the corresponding interface for this struct.
+				// That lets us know where this struct should be stored
+				// when we encounter it during unmarshaling.
+				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+				u.setTag(fieldNum, of.field, unmarshal, 0, name)
+			}
+		}
+
+	}
+
+	// Get extension ranges, if any.
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..1ded05b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2887 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	// Tag-delimited aggregate.
+	// Group type is deprecated and not supported in proto3. However, Proto3
+	// implementations should still be able to parse the group wire format and
+	// treat group fields as unknown fields.
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
+	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+	0: "IDEMPOTENCY_UNKNOWN",
+	1: "NO_SIDE_EFFECTS",
+	2: "IDEMPOTENT",
+}
+
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+	"IDEMPOTENCY_UNKNOWN": 0,
+	"NO_SIDE_EFFECTS":     1,
+	"IDEMPOTENT":          2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+	p := new(MethodOptions_IdempotencyLevel)
+	*p = x
+	return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+	if err != nil {
+		return err
+	}
+	*x = MethodOptions_IdempotencyLevel(value)
+	return nil
+}
+
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()    {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{0}
+}
+
+func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
+}
+func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
+}
+func (m *FileDescriptorSet) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorSet.Size(m)
+}
+func (m *FileDescriptorSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()    {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{1}
+}
+
+func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
+}
+func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
+}
+func (m *FileDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorProto.Size(m)
+}
+func (m *FileDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()    {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2}
+}
+
+func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
+}
+func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
+}
+func (m *DescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto.Size(m)
+}
+func (m *DescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
+}
+func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+type ExtensionRangeOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage()    {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
+}
+func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
+}
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
+}
+func (m *ExtensionRangeOptions) XXX_Size() int {
+	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
+}
+func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()    {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4}
+}
+
+func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
+}
+func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
+}
+func (m *FieldDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FieldDescriptorProto.Size(m)
+}
+func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()    {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{5}
+}
+
+func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
+}
+func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
+}
+func (m *OneofDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_OneofDescriptorProto.Size(m)
+}
+func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	// Range of reserved numeric values. Reserved numeric values may not be used
+	// by enum values in the same enum declaration. Reserved ranges may not
+	// overlap.
+	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved enum value names, which may not be reused. A given name may only
+	// be reserved once.
+	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()    {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6}
+}
+
+func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
+}
+func (m *EnumDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto.Size(m)
+}
+func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
+func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6, 0}
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()    {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{7}
+}
+
+func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
+}
+func (m *EnumValueDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
+}
+func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()    {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{8}
+}
+
+func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
+}
+func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
+}
+func (m *ServiceDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
+}
+func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()    {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{9}
+}
+
+func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
+}
+func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
+}
+func (m *MethodDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_MethodDescriptorProto.Size(m)
+}
+func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// This option does nothing.
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// By default Swift generators will take the proto package and CamelCase it
+	// replacing '.' with underscore and use that to prefix the types/symbols
+	// defined. When this options is provided, they will use this value instead
+	// to prefix the types/symbols defined.
+	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+	// Sets the php class prefix which is prepended to all php generated classes
+	// from this .proto. Default is empty.
+	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be used
+	// for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// The parser stores options it doesn't recognize here.
+	// See the documentation for the "Options" section above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FileOptions) Reset()         { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()    {}
+func (*FileOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10}
+}
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+func (m *FileOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
+}
+func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
+}
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
+}
+func (m *FileOptions) XXX_Size() int {
+	return xxx_messageInfo_FileOptions.Size(m)
+}
+func (m *FileOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileOptions proto.InternalMessageInfo
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+	if m != nil && m.SwiftPrefix != nil {
+		return *m.SwiftPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+	if m != nil && m.PhpClassPrefix != nil {
+		return *m.PhpClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementions still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()    {}
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{11}
+}
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
+}
+func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
+}
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
+}
+func (m *MessageOptions) XXX_Size() int {
+	return xxx_messageInfo_MessageOptions.Size(m)
+}
+func (m *MessageOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+	// is represented as JavaScript string, which avoids loss of precision that
+	// can happen when a large value is converted to a floating point JavaScript.
+	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+	// use the JavaScript "number" type.  The behavior of the default option
+	// JS_NORMAL is implementation dependent.
+	//
+	// This option is an enum to permit additional types to be added, e.g.
+	// goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()    {}
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12}
+}
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
+}
+func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
+}
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
+}
+func (m *FieldOptions) XXX_Size() int {
+	return xxx_messageInfo_FieldOptions.Size(m)
+}
+func (m *FieldOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()    {}
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{13}
+}
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
+}
+func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
+}
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
+}
+func (m *OneofOptions) XXX_Size() int {
+	return xxx_messageInfo_OneofOptions.Size(m)
+}
+func (m *OneofOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()    {}
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{14}
+}
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
+}
+func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
+}
+func (m *EnumOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumOptions.Size(m)
+}
+func (m *EnumOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()    {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{15}
+}
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
+}
+func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
+}
+func (m *EnumValueOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumValueOptions.Size(m)
+}
+func (m *EnumValueOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()    {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{16}
+}
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
+}
+func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
+}
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
+}
+func (m *ServiceOptions) XXX_Size() int {
+	return xxx_messageInfo_ServiceOptions.Size(m)
+}
+func (m *ServiceOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()    {}
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17}
+}
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
+}
+func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
+}
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
+}
+func (m *MethodOptions) XXX_Size() int {
+	return xxx_messageInfo_MethodOptions.Size(m)
+}
+func (m *MethodOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+	if m != nil && m.IdempotencyLevel != nil {
+		return *m.IdempotencyLevel
+	}
+	return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()    {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18}
+}
+
+func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
+}
+func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
+}
+func (m *UninterpretedOption) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption.Size(m)
+}
+func (m *UninterpretedOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18, 0}
+}
+
+func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
+}
+func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
+}
+func (m *UninterpretedOption_NamePart) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
+}
+func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()    {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19}
+}
+
+func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
+}
+func (m *SourceCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo.Size(m)
+}
+func (m *SourceCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()    {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19, 0}
+}
+
+func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
+}
+func (m *SourceCodeInfo_Location) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
+}
+func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()    {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20}
+}
+
+func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
+}
+func (m *GeneratedCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
+}
+func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+
+var fileDescriptor_e5baabe45344a177 = []byte{
+	// 2589 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+	0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
+	0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
+	0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
+	0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
+	0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
+	0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
+	0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
+	0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
+	0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
+	0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
+	0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
+	0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
+	0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
+	0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
+	0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
+	0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
+	0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
+	0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
+	0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
+	0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
+	0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
+	0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
+	0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
+	0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
+	0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
+	0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
+	0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
+	0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
+	0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
+	0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
+	0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
+	0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
+	0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
+	0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
+	0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
+	0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
+	0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
+	0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
+	0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
+	0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
+	0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
+	0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
+	0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
+	0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
+	0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
+	0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
+	0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
+	0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
+	0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
+	0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
+	0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
+	0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
+	0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
+	0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
+	0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
+	0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
+	0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
+	0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
+	0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
+	0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
+	0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
+	0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
+	0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
+	0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
+	0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
+	0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
+	0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
+	0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
+	0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
+	0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
+	0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
+	0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
+	0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
+	0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
+	0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
+	0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
+	0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
+	0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
+	0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
+	0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
+	0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
+	0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
+	0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
+	0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
+	0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
+	0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
+	0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
+	0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
+	0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
+	0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
+	0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
+	0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
+	0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
+	0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
+	0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
+	0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
+	0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
+	0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
+	0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
+	0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
+	0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
+	0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
+	0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
+	0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
+	0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
+	0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
+	0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
+	0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
+	0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
+	0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
+	0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
+	0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
+	0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
+	0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
+	0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
+	0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
+	0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
+	0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
+	0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
+	0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
+	0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
+	0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
+	0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
+	0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
+	0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
+	0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
+	0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
+	0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
+	0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
+	0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
+	0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
+	0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
+	0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
+	0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
+	0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
+	0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
+	0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
+	0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
+	0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
+	0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
+	0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
+	0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
+	0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
+	0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
+	0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
+	0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
+	0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
+	0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
+	0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
+	0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
+	0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
+	0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
+	0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
+	0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
+	0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
+	0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
+	0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
+	0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
+	0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
+	0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..ed08fcb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,883 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+
+    optional ExtensionRangeOptions options = 3;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+
+  // Range of reserved numeric values. Reserved values may not be used by
+  // entries in the same enum. Reserved ranges may not overlap.
+  //
+  // Note that this is distinct from DescriptorProto.ReservedRange in that it
+  // is inclusive such that it can appropriately represent the entire int32
+  // domain.
+  message EnumReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Inclusive.
+  }
+
+  // Range of reserved numeric values. Reserved numeric values may not be used
+  // by enum values in the same enum declaration. Reserved ranges may not
+  // overlap.
+  repeated EnumReservedRange reserved_range = 4;
+
+  // Reserved enum value names, which may not be reused. A given name may only
+  // be reserved once.
+  repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 42 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+
+  // Use this option to change the namespace of php generated metadata classes.
+  // Default is empty. When this option is empty, the proto file name will be used
+  // for determining the namespace.
+  optional string php_metadata_namespace = 44;
+
+  // Use this option to change the package of ruby generated classes. Default
+  // is empty. When this option is not set, the package name will be used for
+  // determining the ruby package.
+  optional string ruby_package = 45;
+
+  // The parser stores options it doesn't recognize here.
+  // See the documentation for the "Options" section above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message.
+  // See the documentation for the "Options" section above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+  // is represented as JavaScript string, which avoids loss of precision that
+  // can happen when a large value is converted to a floating point JavaScript.
+  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+  // use the JavaScript "number" type.  The behavior of the default option
+  // JS_NORMAL is implementation dependent.
+  //
+  // This option is an enum to permit additional types to be added, e.g.
+  // goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+	// but it avoids scanning TypeUrl for the slash.
+	if any == nil {
+		return false
+	}
+	name := proto.MessageName(pb)
+	prefix := len(any.TypeUrl) - len(name)
+	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..78ee523
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package any
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	// A URL/resource name that uniquely identifies the type of the serialized
+	// protocol buffer message. The last segment of the URL's path must represent
+	// the fully qualified name of the type (as in
+	// `path/google.protobuf.Duration`). The name should be in a canonical form
+	// (e.g., leading "." is not accepted).
+	//
+	// In practice, teams usually precompile into the binary all types that they
+	// expect it to use in the context of Any. However, for URLs which use the
+	// scheme `http`, `https`, or no scheme, one can optionally set up a type
+	// server that maps type URLs to message definitions as follows:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Note: this functionality is not currently available in the official
+	// protobuf release, and it is not used for type URLs beginning with
+	// type.googleapis.com.
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Any) Reset()         { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()    {}
+func (*Any) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+	return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+	xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
+
+func (m *Any) GetTypeUrl() string {
+	if m != nil {
+		return m.TypeUrl
+	}
+	return ""
+}
+
+func (m *Any) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..4932942
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,154 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name that uniquely identifies the type of the serialized
+  // protocol buffer message. The last segment of the URL's path must represent
+  // the fully qualified name of the type (as in
+  // `path/google.protobuf.Duration`). The name should be in a canonical form
+  // (e.g., leading "." is not accepted).
+  //
+  // In practice, teams usually precompile into the binary all types that they
+  // expect it to use in the context of Any. However, for URLs which use the
+  // scheme `http`, `https`, or no scheme, one can optionally set up a type
+  // server that maps type URLs to message definitions as follows:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Note: this functionality is not currently available in the official
+  // protobuf release, and it is not used for type URLs beginning with
+  // type.googleapis.com.
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..26d1ca2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos) * time.Nanosecond
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..0d681ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package duration
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
+	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Duration) Reset()         { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()    {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+}
+
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+	return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *Duration) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+	// 190 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..b4eb03e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package empty
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_900544acb223d5b8, []int{0}
+}
+
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+
+var fileDescriptor_900544acb223d5b8 = []byte{
+	// 148 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+	0xb7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8da0df0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,132 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	ts := &tspb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..31cd846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Timestamp) Reset()         { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()    {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+	// 191 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..eafb3fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+		// the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		for end := d + length; d != end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod
new file mode 100644
index 0000000..f6406bb
--- /dev/null
+++ b/vendor/github.com/golang/snappy/go.mod
@@ -0,0 +1 @@
+module github.com/golang/snappy
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
new file mode 100644
index 0000000..d8156a6
--- /dev/null
+++ b/vendor/github.com/google/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.4.3
+  - 1.5.3
+  - tip
+
+script:
+  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..04fdf09
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman <borman@google.com>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..9d92c11
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,19 @@
+# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services. 
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid).  It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice.  One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation 
+[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here: 
+http://godoc.org/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+	Person = Domain(0)
+	Group  = Domain(1)
+	Org    = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group.  The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+	uuid, err := NewUUID()
+	if err == nil {
+		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+		uuid[9] = byte(domain)
+		binary.BigEndian.PutUint32(uuid[0:], id)
+	}
+	return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+//  NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+	return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+//  NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+	return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID.  Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+	return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+	return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+	switch d {
+	case Person:
+		return "Person"
+	case Group:
+		return "Group"
+	case Org:
+		return "Org"
+	}
+	return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array.  UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod
new file mode 100644
index 0000000..fc84cd7
--- /dev/null
+++ b/vendor/github.com/google/uuid/go.mod
@@ -0,0 +1 @@
+module github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..b174616
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+	NameSpaceDNS  = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceURL  = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+	Nil           UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h.  The hash should be at least 16 byte in length.  The
+// first 16 bytes of the hash are used to form the UUID.  The version of the
+// UUID will be the lower 4 bits of version.  NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+	h.Reset()
+	h.Write(space[:])
+	h.Write(data)
+	s := h.Sum(nil)
+	var uuid UUID
+	copy(uuid[:], s)
+	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+	return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+	return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+	return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..7f9e0c6
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+	var js [36]byte
+	encodeHex(js[:], uuid)
+	return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err == nil {
+		*uuid = id
+	}
+	return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+	return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(uuid[:], data)
+	return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"sync"
+)
+
+var (
+	nodeMu sync.Mutex
+	ifname string  // name of interface being used
+	nodeID [6]byte // hardware for version 1 UUIDs
+	zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived.  The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated.  If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+	iname, addr := getHardwareInterface(name) // null implementation for js
+	if iname != "" && addr != nil {
+		ifname = iname
+		copy(nodeID[:], addr)
+		return true
+	}
+
+	// We found no interfaces with a valid hardware address.  If name
+	// does not specify a specific interface generate a random Node ID
+	// (section 4.1.6)
+	if name == "" {
+		ifname = "random"
+		randomBits(nodeID[:])
+		return true
+	}
+	return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nid := nodeID
+	return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
+// of id are used.  If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+	if len(id) < 6 {
+		return false
+	}
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	copy(nodeID[:], id)
+	ifname = "user"
+	return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
+// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+	var node [6]byte
+	copy(node[:], uuid[10:])
+	return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..24b78ed
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned.  If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+	if interfaces == nil {
+		var err error
+		interfaces, err = net.Interfaces()
+		if err != nil {
+			return "", nil
+		}
+	}
+	for _, ifs := range interfaces {
+		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+			return ifs.Name, ifs.HardwareAddr
+		}
+	}
+	return "", nil
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..f326b54
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"database/sql/driver"
+	"fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case nil:
+		return nil
+
+	case string:
+		// if an empty UUID comes from a table, we return a null UUID
+		if src == "" {
+			return nil
+		}
+
+		// see Parse for required string format
+		u, err := Parse(src)
+		if err != nil {
+			return fmt.Errorf("Scan: %v", err)
+		}
+
+		*uuid = u
+
+	case []byte:
+		// if an empty UUID comes from a table, we return a null UUID
+		if len(src) == 0 {
+			return nil
+		}
+
+		// assumes a simple slice of bytes if 16 bytes
+		// otherwise attempts to parse
+		if len(src) != 16 {
+			return uuid.Scan(string(src))
+		}
+		copy((*uuid)[:], src)
+
+	default:
+		return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+	}
+
+	return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+	return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..e6ef06c
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"sync"
+	"time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+	lillian    = 2299160          // Julian day of 15 Oct 1582
+	unix       = 2440587          // Julian day of 1 Jan 1970
+	epoch      = unix - lillian   // Days between epochs
+	g1582      = epoch * 86400    // seconds between epochs
+	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+	timeMu   sync.Mutex
+	lasttime uint64 // last time we returned
+	clockSeq uint16 // clock sequence for this run
+
+	timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+	sec = int64(t - g1582ns100)
+	nsec = (sec % 10000000) * 100
+	sec /= 10000000
+	return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed.  An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+	t := timeNow()
+
+	// If we don't have a clock sequence already, set one.
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	now := uint64(t.UnixNano()/100) + g1582ns100
+
+	// If time has gone backwards with this clock sequence then we
+	// increment the clock sequence
+	if now <= lasttime {
+		clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+	}
+	lasttime = now
+	return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set.  The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated.  Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1)
+func ClockSequence() int {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return clockSequence()
+}
+
+func clockSequence() int {
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq.  Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+	if seq == -1 {
+		var b [2]byte
+		randomBits(b[:]) // clock sequence
+		seq = int(b[0])<<8 | int(b[1])
+	}
+	oldSeq := clockSeq
+	clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+	if oldSeq != clockSeq {
+		lasttime = 0
+	}
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid.  The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+	return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+	if _, err := io.ReadFull(rander, b); err != nil {
+		panic(err.Error()) // rand should never fail
+	}
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+	b1 := xvalues[x1]
+	b2 := xvalues[x2]
+	return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..524404c
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,245 @@
+// Copyright 2018 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+	Invalid   = Variant(iota) // Invalid UUID
+	RFC4122                   // The variant specified in RFC4122
+	Reserved                  // Reserved, NCS backward compatibility.
+	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
+	Future                    // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error.  Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+	var uuid UUID
+	switch len(s) {
+	// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36:
+
+	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9:
+		if strings.ToLower(s[:9]) != "urn:uuid:" {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+	case 36 + 2:
+		s = s[1:]
+
+	// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+	case 32:
+		var ok bool
+		for i := range uuid {
+			uuid[i], ok = xtob(s[i*2], s[i*2+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(s[x], s[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+	var uuid UUID
+	switch len(b) {
+	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+		}
+		b = b[9:]
+	case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+		b = b[1:]
+	case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+		var ok bool
+		for i := 0; i < 32; i += 2 {
+			uuid[i/2], ok = xtob(b[i], b[i+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(b[x], b[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+	uuid, err := Parse(s)
+	if err != nil {
+		panic(`uuid: Parse(` + s + `): ` + err.Error())
+	}
+	return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+	err = uuid.UnmarshalBinary(b)
+	return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+	var buf [36]byte
+	encodeHex(buf[:], uuid)
+	return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+	var buf [36 + 9]byte
+	copy(buf[:], "urn:uuid:")
+	encodeHex(buf[9:], uuid)
+	return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+	hex.Encode(dst, uuid[:4])
+	dst[8] = '-'
+	hex.Encode(dst[9:13], uuid[4:6])
+	dst[13] = '-'
+	hex.Encode(dst[14:18], uuid[6:8])
+	dst[18] = '-'
+	hex.Encode(dst[19:23], uuid[8:10])
+	dst[23] = '-'
+	hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+	switch {
+	case (uuid[8] & 0xc0) == 0x80:
+		return RFC4122
+	case (uuid[8] & 0xe0) == 0xc0:
+		return Microsoft
+	case (uuid[8] & 0xe0) == 0xe0:
+		return Future
+	default:
+		return Reserved
+	}
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+	return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+	if v > 15 {
+		return fmt.Sprintf("BAD_VERSION_%d", v)
+	}
+	return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+	switch v {
+	case RFC4122:
+		return "RFC4122"
+	case Reserved:
+		return "Reserved"
+	case Microsoft:
+		return "Microsoft"
+	case Future:
+		return "Future"
+	case Invalid:
+		return "Invalid"
+	}
+	return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+	if r == nil {
+		rander = rand.Reader
+		return
+	}
+	rander = r
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..199a1ac
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time.  If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
+// be set NewUUID returns nil.  If clock sequence has not been set by
+// SetClockSequence then it will be set automatically.  If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nodeMu.Unlock()
+
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	timeLow := uint32(now & 0xffffffff)
+	timeMid := uint16((now >> 32) & 0xffff)
+	timeHi := uint16((now >> 48) & 0x0fff)
+	timeHi |= 0x1000 // Version 1
+
+	binary.BigEndian.PutUint32(uuid[0:], timeLow)
+	binary.BigEndian.PutUint16(uuid[4:], timeMid)
+	binary.BigEndian.PutUint16(uuid[6:], timeHi)
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+	copy(uuid[10:], nodeID[:])
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..84af91c
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics.  New is equivalent to
+// the expression
+//
+//    uuid.Must(uuid.NewRandom())
+func New() UUID {
+	return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
+//  hit by a meteorite is estimated to be one chance in 17 billion, that
+//  means the probability is about 0.00000000006 (6 × 10−11),
+//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
+//  year and having one duplicate.
+func NewRandom() (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return Nil, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml
new file mode 100644
index 0000000..7698490
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+sudo: false
+
+go:
+  - 1.4
+  - 1.5
+  - 1.6
+  - tip
+
+script:
+  - go test -bench . -benchmem -v ./...
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 0000000..fbde8b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
+# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
+
+Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod
new file mode 100644
index 0000000..dd57f9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/go-uuid
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 0000000..911227f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,65 @@
+package uuid
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+)
+
+// GenerateRandomBytes is used to generate random bytes of given size.
+func GenerateRandomBytes(size int) ([]byte, error) {
+	buf := make([]byte, size)
+	if _, err := rand.Read(buf); err != nil {
+		return nil, fmt.Errorf("failed to read random bytes: %v", err)
+	}
+	return buf, nil
+}
+
+const uuidLen = 16
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+	buf, err := GenerateRandomBytes(uuidLen)
+	if err != nil {
+		return "", err
+	}
+	return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+	if buflen := len(buf); buflen != uuidLen {
+		return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
+	}
+
+	return fmt.Sprintf("%x-%x-%x-%x-%x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+	if len(uuid) != 2 * uuidLen + 4 {
+		return nil, fmt.Errorf("uuid string is wrong length")
+	}
+
+	if uuid[8] != '-' ||
+		uuid[13] != '-' ||
+		uuid[18] != '-' ||
+		uuid[23] != '-' {
+		return nil, fmt.Errorf("uuid is improperly formatted")
+	}
+
+	hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+	ret, err := hex.DecodeString(hexStr)
+	if err != nil {
+		return nil, err
+	}
+	if len(ret) != uuidLen {
+		return nil, fmt.Errorf("decoded hex is the wrong length")
+	}
+
+	return ret, nil
+}
diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
new file mode 100644
index 0000000..66a2a8c
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
@@ -0,0 +1,5 @@
+This is a temporary repository that will be removed when the issues below are fixed in the core golang code.
+
+## Issues
+* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832)
+* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834)
\ No newline at end of file
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
new file mode 100644
index 0000000..f1bb767
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
@@ -0,0 +1,1003 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+	"errors"
+	"fmt"
+	"math/big"
+	"reflect"
+	"strconv"
+	"time"
+	"unicode/utf8"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+	Msg string
+}
+
+func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+	Msg string
+}
+
+func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+	if len(bytes) != 1 {
+		err = SyntaxError{"invalid boolean"}
+		return
+	}
+
+	// DER demands that "If the encoding represents the boolean value TRUE,
+	// its single contents octet shall have all eight bits set to one."
+	// Thus only 0 and 255 are valid encoded values.
+	switch bytes[0] {
+	case 0:
+		ret = false
+	case 0xff:
+		ret = true
+	default:
+		err = SyntaxError{"invalid boolean"}
+	}
+
+	return
+}
+
+// INTEGER
+
+// checkInteger returns nil if the given bytes are a valid DER-encoded
+// INTEGER and an error otherwise.
+func checkInteger(bytes []byte) error {
+	if len(bytes) == 0 {
+		return StructuralError{"empty integer"}
+	}
+	if len(bytes) == 1 {
+		return nil
+	}
+	if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
+		return StructuralError{"integer not minimally-encoded"}
+	}
+	return nil
+}
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+	err = checkInteger(bytes)
+	if err != nil {
+		return
+	}
+	if len(bytes) > 8 {
+		// We'll overflow an int64 in this case.
+		err = StructuralError{"integer too large"}
+		return
+	}
+	for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+		ret <<= 8
+		ret |= int64(bytes[bytesRead])
+	}
+
+	// Shift up and down in order to sign extend the result.
+	ret <<= 64 - uint8(len(bytes))*8
+	ret >>= 64 - uint8(len(bytes))*8
+	return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte) (int32, error) {
+	if err := checkInteger(bytes); err != nil {
+		return 0, err
+	}
+	ret64, err := parseInt64(bytes)
+	if err != nil {
+		return 0, err
+	}
+	if ret64 != int64(int32(ret64)) {
+		return 0, StructuralError{"integer too large"}
+	}
+	return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) (*big.Int, error) {
+	if err := checkInteger(bytes); err != nil {
+		return nil, err
+	}
+	ret := new(big.Int)
+	if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+		// This is a negative number.
+		notBytes := make([]byte, len(bytes))
+		for i := range notBytes {
+			notBytes[i] = ^bytes[i]
+		}
+		ret.SetBytes(notBytes)
+		ret.Add(ret, bigOne)
+		ret.Neg(ret)
+		return ret, nil
+	}
+	ret.SetBytes(bytes)
+	return ret, nil
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+	Bytes     []byte // bits packed into bytes.
+	BitLength int    // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+	if i < 0 || i >= b.BitLength {
+		return 0
+	}
+	x := i / 8
+	y := 7 - uint(i%8)
+	return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+	shift := uint(8 - (b.BitLength % 8))
+	if shift == 8 || len(b.Bytes) == 0 {
+		return b.Bytes
+	}
+
+	a := make([]byte, len(b.Bytes))
+	a[0] = b.Bytes[0] >> shift
+	for i := 1; i < len(b.Bytes); i++ {
+		a[i] = b.Bytes[i-1] << (8 - shift)
+		a[i] |= b.Bytes[i] >> shift
+	}
+
+	return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+	if len(bytes) == 0 {
+		err = SyntaxError{"zero length BIT STRING"}
+		return
+	}
+	paddingBits := int(bytes[0])
+	if paddingBits > 7 ||
+		len(bytes) == 1 && paddingBits > 0 ||
+		bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
+		err = SyntaxError{"invalid padding bits in BIT STRING"}
+		return
+	}
+	ret.BitLength = (len(bytes)-1)*8 - paddingBits
+	ret.Bytes = bytes[1:]
+	return
+}
+
+// OBJECT IDENTIFIER
+
+// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
+type ObjectIdentifier []int
+
+// Equal reports whether oi and other represent the same identifier.
+func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
+	if len(oi) != len(other) {
+		return false
+	}
+	for i := 0; i < len(oi); i++ {
+		if oi[i] != other[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (oi ObjectIdentifier) String() string {
+	var s string
+
+	for i, v := range oi {
+		if i > 0 {
+			s += "."
+		}
+		s += strconv.Itoa(v)
+	}
+
+	return s
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+	if len(bytes) == 0 {
+		err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+		return
+	}
+
+	// In the worst case, we get two elements from the first byte (which is
+	// encoded differently) and then every varint is a single byte long.
+	s = make([]int, len(bytes)+1)
+
+	// The first varint is 40*value1 + value2:
+	// According to this packing, value1 can take the values 0, 1 and 2 only.
+	// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+	// then there are no restrictions on value2.
+	v, offset, err := parseBase128Int(bytes, 0)
+	if err != nil {
+		return
+	}
+	if v < 80 {
+		s[0] = v / 40
+		s[1] = v % 40
+	} else {
+		s[0] = 2
+		s[1] = v - 80
+	}
+
+	i := 2
+	for ; offset < len(bytes); i++ {
+		v, offset, err = parseBase128Int(bytes, offset)
+		if err != nil {
+			return
+		}
+		s[i] = v
+	}
+	s = s[0:i]
+	return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+	offset = initOffset
+	for shifted := 0; offset < len(bytes); shifted++ {
+		if shifted == 4 {
+			err = StructuralError{"base 128 integer too large"}
+			return
+		}
+		ret <<= 7
+		b := bytes[offset]
+		ret |= int(b & 0x7f)
+		offset++
+		if b&0x80 == 0 {
+			return
+		}
+	}
+	err = SyntaxError{"truncated base 128 integer"}
+	return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+	s := string(bytes)
+
+	formatStr := "0601021504Z0700"
+	ret, err = time.Parse(formatStr, s)
+	if err != nil {
+		formatStr = "060102150405Z0700"
+		ret, err = time.Parse(formatStr, s)
+	}
+	if err != nil {
+		return
+	}
+
+	if serialized := ret.Format(formatStr); serialized != s {
+		err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+		return
+	}
+
+	if ret.Year() >= 2050 {
+		// UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+		ret = ret.AddDate(-100, 0, 0)
+	}
+
+	return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+	const formatStr = "20060102150405Z0700"
+	s := string(bytes)
+
+	if ret, err = time.Parse(formatStr, s); err != nil {
+		return
+	}
+
+	if serialized := ret.Format(formatStr); serialized != s {
+		err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+	}
+
+	return
+}
+
+// PrintableString
+
+// parsePrintableString parses a ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+	for _, b := range bytes {
+		if !isPrintable(b) {
+			err = SyntaxError{"PrintableString contains invalid character"}
+			return
+		}
+	}
+	ret = string(bytes)
+	return
+}
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+func isPrintable(b byte) bool {
+	return 'a' <= b && b <= 'z' ||
+		'A' <= b && b <= 'Z' ||
+		'0' <= b && b <= '9' ||
+		'\'' <= b && b <= ')' ||
+		'+' <= b && b <= '/' ||
+		b == ' ' ||
+		b == ':' ||
+		b == '=' ||
+		b == '?' ||
+		// This is technically not allowed in a PrintableString.
+		// However, x509 certificates with wildcard strings don't
+		// always use the correct string type so we permit it.
+		b == '*'
+}
+
+// IA5String
+
+// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+	for _, b := range bytes {
+		if b >= utf8.RuneSelf {
+			err = SyntaxError{"IA5String contains invalid character"}
+			return
+		}
+	}
+	ret = string(bytes)
+	return
+}
+
+// T61String
+
+// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+	return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+	if !utf8.Valid(bytes) {
+		return "", errors.New("asn1: invalid UTF-8 string")
+	}
+	return string(bytes), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+	Class, Tag int
+	IsCompound bool
+	Bytes      []byte
+	FullBytes  []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+	offset = initOffset
+	// parseTagAndLength should not be called without at least a single
+	// byte to read. Thus this check is for robustness:
+	if offset >= len(bytes) {
+		err = errors.New("asn1: internal error in parseTagAndLength")
+		return
+	}
+	b := bytes[offset]
+	offset++
+	ret.class = int(b >> 6)
+	ret.isCompound = b&0x20 == 0x20
+	ret.tag = int(b & 0x1f)
+
+	// If the bottom five bits are set, then the tag number is actually base 128
+	// encoded afterwards
+	if ret.tag == 0x1f {
+		ret.tag, offset, err = parseBase128Int(bytes, offset)
+		if err != nil {
+			return
+		}
+		// Tags should be encoded in minimal form.
+		if ret.tag < 0x1f {
+			err = SyntaxError{"non-minimal tag"}
+			return
+		}
+	}
+	if offset >= len(bytes) {
+		err = SyntaxError{"truncated tag or length"}
+		return
+	}
+	b = bytes[offset]
+	offset++
+	if b&0x80 == 0 {
+		// The length is encoded in the bottom 7 bits.
+		ret.length = int(b & 0x7f)
+	} else {
+		// Bottom 7 bits give the number of length bytes to follow.
+		numBytes := int(b & 0x7f)
+		if numBytes == 0 {
+			err = SyntaxError{"indefinite length found (not DER)"}
+			return
+		}
+		ret.length = 0
+		for i := 0; i < numBytes; i++ {
+			if offset >= len(bytes) {
+				err = SyntaxError{"truncated tag or length"}
+				return
+			}
+			b = bytes[offset]
+			offset++
+			if ret.length >= 1<<23 {
+				// We can't shift ret.length up without
+				// overflowing.
+				err = StructuralError{"length too large"}
+				return
+			}
+			ret.length <<= 8
+			ret.length |= int(b)
+			if ret.length == 0 {
+				// DER requires that lengths be minimal.
+				err = StructuralError{"superfluous leading zeros in length"}
+				return
+			}
+		}
+		// Short lengths must be encoded in short form.
+		if ret.length < 0x80 {
+			err = StructuralError{"non-minimal length"}
+			return
+		}
+	}
+
+	return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+	expectedTag, compoundType, ok := getUniversalType(elemType)
+	if !ok {
+		err = StructuralError{"unknown Go type for slice"}
+		return
+	}
+
+	// First we iterate over the input and count the number of elements,
+	// checking that the types are correct in each case.
+	numElements := 0
+	for offset := 0; offset < len(bytes); {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		switch t.tag {
+		case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+			// We pretend that various other string types are
+			// PRINTABLE STRINGs so that a sequence of them can be
+			// parsed into a []string.
+			t.tag = TagPrintableString
+		case TagGeneralizedTime, TagUTCTime:
+			// Likewise, both time types are treated the same.
+			t.tag = TagUTCTime
+		}
+
+		if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag {
+			err = StructuralError{"sequence tag mismatch"}
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"truncated sequence"}
+			return
+		}
+		offset += t.length
+		numElements++
+	}
+	ret = reflect.MakeSlice(sliceType, numElements, numElements)
+	params := fieldParameters{}
+	offset := 0
+	for i := 0; i < numElements; i++ {
+		offset, err = parseField(ret.Index(i), bytes, offset, params)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+var (
+	bitStringType        = reflect.TypeOf(BitString{})
+	objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+	enumeratedType       = reflect.TypeOf(Enumerated(0))
+	flagType             = reflect.TypeOf(Flag(false))
+	timeType             = reflect.TypeOf(time.Time{})
+	rawValueType         = reflect.TypeOf(RawValue{})
+	rawContentsType      = reflect.TypeOf(RawContent(nil))
+	bigIntType           = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength returns true iff offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+	return offset+length < offset || offset+length > sliceLength
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+	offset = initOffset
+	fieldType := v.Type()
+
+	// If we have run out of data, it may be that there are optional elements at the end.
+	if offset == len(bytes) {
+		if !setDefaultValue(v, params) {
+			err = SyntaxError{"sequence truncated"}
+		}
+		return
+	}
+
+	// Deal with raw values.
+	if fieldType == rawValueType {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"data truncated"}
+			return
+		}
+		result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
+		offset += t.length
+		v.Set(reflect.ValueOf(result))
+		return
+	}
+
+	// Deal with the ANY type.
+	if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+		var t tagAndLength
+		t, offset, err = parseTagAndLength(bytes, offset)
+		if err != nil {
+			return
+		}
+		if invalidLength(offset, t.length, len(bytes)) {
+			err = SyntaxError{"data truncated"}
+			return
+		}
+		var result interface{}
+		if !t.isCompound && t.class == ClassUniversal {
+			innerBytes := bytes[offset : offset+t.length]
+			switch t.tag {
+			case TagPrintableString:
+				result, err = parsePrintableString(innerBytes)
+			case TagIA5String:
+				result, err = parseIA5String(innerBytes)
+			// jtasn1 addition of following case
+			case TagGeneralString:
+				result, err = parseIA5String(innerBytes)
+			case TagT61String:
+				result, err = parseT61String(innerBytes)
+			case TagUTF8String:
+				result, err = parseUTF8String(innerBytes)
+			case TagInteger:
+				result, err = parseInt64(innerBytes)
+			case TagBitString:
+				result, err = parseBitString(innerBytes)
+			case TagOID:
+				result, err = parseObjectIdentifier(innerBytes)
+			case TagUTCTime:
+				result, err = parseUTCTime(innerBytes)
+			case TagGeneralizedTime:
+				result, err = parseGeneralizedTime(innerBytes)
+			case TagOctetString:
+				result = innerBytes
+			default:
+				// If we don't know how to handle the type, we just leave Value as nil.
+			}
+		}
+		offset += t.length
+		if err != nil {
+			return
+		}
+		if result != nil {
+			v.Set(reflect.ValueOf(result))
+		}
+		return
+	}
+	universalTag, compoundType, ok1 := getUniversalType(fieldType)
+	if !ok1 {
+		err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+		return
+	}
+
+	t, offset, err := parseTagAndLength(bytes, offset)
+	if err != nil {
+		return
+	}
+	if params.explicit {
+		expectedClass := ClassContextSpecific
+		if params.application {
+			expectedClass = ClassApplication
+		}
+		if offset == len(bytes) {
+			err = StructuralError{"explicit tag has no child"}
+			return
+		}
+		if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+			if t.length > 0 {
+				t, offset, err = parseTagAndLength(bytes, offset)
+				if err != nil {
+					return
+				}
+			} else {
+				if fieldType != flagType {
+					err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
+					return
+				}
+				v.SetBool(true)
+				return
+			}
+		} else {
+			// The tags didn't match, it might be an optional element.
+			ok := setDefaultValue(v, params)
+			if ok {
+				offset = initOffset
+			} else {
+				err = StructuralError{"explicitly tagged member didn't match"}
+			}
+			return
+		}
+	}
+
+	// Special case for strings: all the ASN.1 string types map to the Go
+	// type string. getUniversalType returns the tag for PrintableString
+	// when it sees a string, so if we see a different string type on the
+	// wire, we change the universal type to match.
+	if universalTag == TagPrintableString {
+		if t.class == ClassUniversal {
+			switch t.tag {
+			case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+				universalTag = t.tag
+			}
+		} else if params.stringType != 0 {
+			universalTag = params.stringType
+		}
+	}
+
+	// Special case for time: UTCTime and GeneralizedTime both map to the
+	// Go type time.Time.
+	if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
+		universalTag = TagGeneralizedTime
+	}
+
+	if params.set {
+		universalTag = TagSet
+	}
+
+	expectedClass := ClassUniversal
+	expectedTag := universalTag
+
+	if !params.explicit && params.tag != nil {
+		expectedClass = ClassContextSpecific
+		expectedTag = *params.tag
+	}
+
+	if !params.explicit && params.application && params.tag != nil {
+		expectedClass = ClassApplication
+		expectedTag = *params.tag
+	}
+
+	// We have unwrapped any explicit tagging at this point.
+	if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
+		// Tags don't match. Again, it could be an optional element.
+		ok := setDefaultValue(v, params)
+		if ok {
+			offset = initOffset
+		} else {
+			err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+		}
+		return
+	}
+	if invalidLength(offset, t.length, len(bytes)) {
+		err = SyntaxError{"data truncated"}
+		return
+	}
+	innerBytes := bytes[offset : offset+t.length]
+	offset += t.length
+
+	// We deal with the structures defined in this package first.
+	switch fieldType {
+	case objectIdentifierType:
+		newSlice, err1 := parseObjectIdentifier(innerBytes)
+		v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+		if err1 == nil {
+			reflect.Copy(v, reflect.ValueOf(newSlice))
+		}
+		err = err1
+		return
+	case bitStringType:
+		bs, err1 := parseBitString(innerBytes)
+		if err1 == nil {
+			v.Set(reflect.ValueOf(bs))
+		}
+		err = err1
+		return
+	case timeType:
+		var time time.Time
+		var err1 error
+		if universalTag == TagUTCTime {
+			time, err1 = parseUTCTime(innerBytes)
+		} else {
+			time, err1 = parseGeneralizedTime(innerBytes)
+		}
+		if err1 == nil {
+			v.Set(reflect.ValueOf(time))
+		}
+		err = err1
+		return
+	case enumeratedType:
+		parsedInt, err1 := parseInt32(innerBytes)
+		if err1 == nil {
+			v.SetInt(int64(parsedInt))
+		}
+		err = err1
+		return
+	case flagType:
+		v.SetBool(true)
+		return
+	case bigIntType:
+		parsedInt, err1 := parseBigInt(innerBytes)
+		if err1 == nil {
+			v.Set(reflect.ValueOf(parsedInt))
+		}
+		err = err1
+		return
+	}
+	switch val := v; val.Kind() {
+	case reflect.Bool:
+		parsedBool, err1 := parseBool(innerBytes)
+		if err1 == nil {
+			val.SetBool(parsedBool)
+		}
+		err = err1
+		return
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		if val.Type().Size() == 4 {
+			parsedInt, err1 := parseInt32(innerBytes)
+			if err1 == nil {
+				val.SetInt(int64(parsedInt))
+			}
+			err = err1
+		} else {
+			parsedInt, err1 := parseInt64(innerBytes)
+			if err1 == nil {
+				val.SetInt(parsedInt)
+			}
+			err = err1
+		}
+		return
+	// TODO(dfc) Add support for the remaining integer types
+	case reflect.Struct:
+		structType := fieldType
+
+		if structType.NumField() > 0 &&
+			structType.Field(0).Type == rawContentsType {
+			bytes := bytes[initOffset:offset]
+			val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+		}
+
+		innerOffset := 0
+		for i := 0; i < structType.NumField(); i++ {
+			field := structType.Field(i)
+			if i == 0 && field.Type == rawContentsType {
+				continue
+			}
+			innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+			if err != nil {
+				return
+			}
+		}
+		// We allow extra bytes at the end of the SEQUENCE because
+		// adding elements to the end has been used in X.509 as the
+		// version numbers have increased.
+		return
+	case reflect.Slice:
+		sliceType := fieldType
+		if sliceType.Elem().Kind() == reflect.Uint8 {
+			val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+			reflect.Copy(val, reflect.ValueOf(innerBytes))
+			return
+		}
+		newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+		if err1 == nil {
+			val.Set(newSlice)
+		}
+		err = err1
+		return
+	case reflect.String:
+		var v string
+		switch universalTag {
+		case TagPrintableString:
+			v, err = parsePrintableString(innerBytes)
+		case TagIA5String:
+			v, err = parseIA5String(innerBytes)
+		case TagT61String:
+			v, err = parseT61String(innerBytes)
+		case TagUTF8String:
+			v, err = parseUTF8String(innerBytes)
+		case TagGeneralString:
+			// GeneralString is specified in ISO-2022/ECMA-35,
+			// A brief review suggests that it includes structures
+			// that allow the encoding to change midstring and
+			// such. We give up and pass it as an 8-bit string.
+			v, err = parseT61String(innerBytes)
+		default:
+			err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+		}
+		if err == nil {
+			val.SetString(v)
+		}
+		return
+	}
+	err = StructuralError{"unsupported: " + v.Type().String()}
+	return
+}
+
+// canHaveDefaultValue reports whether k is a Kind that we will set a default
+// value for. (A signed integer, essentially.)
+func canHaveDefaultValue(k reflect.Kind) bool {
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	}
+
+	return false
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful if the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+	if !params.optional {
+		return
+	}
+	ok = true
+	if params.defaultValue == nil {
+		return
+	}
+	if canHaveDefaultValue(v.Kind()) {
+		v.SetInt(*params.defaultValue)
+	}
+	return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString or IA5String can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+//	application	specifies that a APPLICATION tag is used
+//	default:x	sets the default value for optional integer fields
+//	explicit	specifies that an additional, explicit tag wraps the implicit one
+//	optional	marks the field as ASN.1 OPTIONAL
+//	set		causes a SET, rather than a SEQUENCE type to be expected
+//	tag:x		specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// If the type name of a slice element ends with "SET" then it's treated as if
+// the "set" tag was set on it. This can be used with nested slices where a
+// struct tag cannot be given.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+	return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+	v := reflect.ValueOf(val).Elem()
+	offset, err := parseField(v, b, 0, parseFieldParameters(params))
+	if err != nil {
+		return nil, err
+	}
+	return b[offset:], nil
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
new file mode 100644
index 0000000..7a9da49
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
@@ -0,0 +1,173 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+//   the tag: the type of the object
+//   a flag denoting if this object is compound or not
+//   the class type: the namespace of the tag
+//   the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+// ASN.1 tags represent the type of the following object.
+const (
+	TagBoolean         = 1
+	TagInteger         = 2
+	TagBitString       = 3
+	TagOctetString     = 4
+	TagOID             = 6
+	TagEnum            = 10
+	TagUTF8String      = 12
+	TagSequence        = 16
+	TagSet             = 17
+	TagPrintableString = 19
+	TagT61String       = 20
+	TagIA5String       = 22
+	TagUTCTime         = 23
+	TagGeneralizedTime = 24
+	TagGeneralString   = 27
+)
+
+// ASN.1 class types represent the namespace of the tag.
+const (
+	ClassUniversal       = 0
+	ClassApplication     = 1
+	ClassContextSpecific = 2
+	ClassPrivate         = 3
+)
+
+type tagAndLength struct {
+	class, tag, length int
+	isCompound         bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+	optional     bool   // true iff the field is OPTIONAL
+	explicit     bool   // true iff an EXPLICIT tag is in use.
+	application  bool   // true iff an APPLICATION tag is in use.
+	defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+	tag          *int   // the EXPLICIT or IMPLICIT tag (maybe nil).
+	stringType   int    // the string tag to use when marshaling.
+	timeType     int    // the time tag to use when marshaling.
+	set          bool   // true iff this should be encoded as a SET
+	omitEmpty    bool   // true iff this should be omitted if empty when marshaling.
+
+	// Invariants:
+	//   if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+	for _, part := range strings.Split(str, ",") {
+		switch {
+		case part == "optional":
+			ret.optional = true
+		case part == "explicit":
+			ret.explicit = true
+			if ret.tag == nil {
+				ret.tag = new(int)
+			}
+		case part == "generalized":
+			ret.timeType = TagGeneralizedTime
+		case part == "utc":
+			ret.timeType = TagUTCTime
+		case part == "ia5":
+			ret.stringType = TagIA5String
+		// jtasn1 case below added
+		case part == "generalstring":
+			ret.stringType = TagGeneralString
+		case part == "printable":
+			ret.stringType = TagPrintableString
+		case part == "utf8":
+			ret.stringType = TagUTF8String
+		case strings.HasPrefix(part, "default:"):
+			i, err := strconv.ParseInt(part[8:], 10, 64)
+			if err == nil {
+				ret.defaultValue = new(int64)
+				*ret.defaultValue = i
+			}
+		case strings.HasPrefix(part, "tag:"):
+			i, err := strconv.Atoi(part[4:])
+			if err == nil {
+				ret.tag = new(int)
+				*ret.tag = i
+			}
+		case part == "set":
+			ret.set = true
+		case part == "application":
+			ret.application = true
+			if ret.tag == nil {
+				ret.tag = new(int)
+			}
+		case part == "omitempty":
+			ret.omitEmpty = true
+		}
+	}
+	return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
+	switch t {
+	case objectIdentifierType:
+		return TagOID, false, true
+	case bitStringType:
+		return TagBitString, false, true
+	case timeType:
+		return TagUTCTime, false, true
+	case enumeratedType:
+		return TagEnum, false, true
+	case bigIntType:
+		return TagInteger, false, true
+	}
+	switch t.Kind() {
+	case reflect.Bool:
+		return TagBoolean, false, true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return TagInteger, false, true
+	case reflect.Struct:
+		return TagSequence, true, true
+	case reflect.Slice:
+		if t.Elem().Kind() == reflect.Uint8 {
+			return TagOctetString, false, true
+		}
+		if strings.HasSuffix(t.Name(), "SET") {
+			return TagSet, true, true
+		}
+		return TagSequence, true, true
+	case reflect.String:
+		return TagPrintableString, false, true
+	}
+	return 0, false, false
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
new file mode 100644
index 0000000..f52eee9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
@@ -0,0 +1,659 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math/big"
+	"reflect"
+	"time"
+	"unicode/utf8"
+)
+
+// A forkableWriter is an in-memory buffer that can be
+// 'forked' to create new forkableWriters that bracket the
+// original. After
+//    pre, post := w.fork()
+// the overall sequence of bytes represented is logically w+pre+post.
+type forkableWriter struct {
+	*bytes.Buffer
+	pre, post *forkableWriter
+}
+
+func newForkableWriter() *forkableWriter {
+	return &forkableWriter{new(bytes.Buffer), nil, nil}
+}
+
+func (f *forkableWriter) fork() (pre, post *forkableWriter) {
+	if f.pre != nil || f.post != nil {
+		panic("have already forked")
+	}
+	f.pre = newForkableWriter()
+	f.post = newForkableWriter()
+	return f.pre, f.post
+}
+
+func (f *forkableWriter) Len() (l int) {
+	l += f.Buffer.Len()
+	if f.pre != nil {
+		l += f.pre.Len()
+	}
+	if f.post != nil {
+		l += f.post.Len()
+	}
+	return
+}
+
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
+	n, err = out.Write(f.Bytes())
+	if err != nil {
+		return
+	}
+
+	var nn int
+
+	if f.pre != nil {
+		nn, err = f.pre.writeTo(out)
+		n += nn
+		if err != nil {
+			return
+		}
+	}
+
+	if f.post != nil {
+		nn, err = f.post.writeTo(out)
+		n += nn
+	}
+	return
+}
+
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
+	if n == 0 {
+		err = out.WriteByte(0)
+		return
+	}
+
+	l := 0
+	for i := n; i > 0; i >>= 7 {
+		l++
+	}
+
+	for i := l - 1; i >= 0; i-- {
+		o := byte(n >> uint(i*7))
+		o &= 0x7f
+		if i != 0 {
+			o |= 0x80
+		}
+		err = out.WriteByte(o)
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func marshalInt64(out *forkableWriter, i int64) (err error) {
+	n := int64Length(i)
+
+	for ; n > 0; n-- {
+		err = out.WriteByte(byte(i >> uint((n-1)*8)))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func int64Length(i int64) (numBytes int) {
+	numBytes = 1
+
+	for i > 127 {
+		numBytes++
+		i >>= 8
+	}
+
+	for i < -128 {
+		numBytes++
+		i >>= 8
+	}
+
+	return
+}
+
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
+	if n.Sign() < 0 {
+		// A negative number has to be converted to two's-complement
+		// form. So we'll subtract 1 and invert. If the
+		// most-significant-bit isn't set then we'll need to pad the
+		// beginning with 0xff in order to keep the number negative.
+		nMinus1 := new(big.Int).Neg(n)
+		nMinus1.Sub(nMinus1, bigOne)
+		bytes := nMinus1.Bytes()
+		for i := range bytes {
+			bytes[i] ^= 0xff
+		}
+		if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+			err = out.WriteByte(0xff)
+			if err != nil {
+				return
+			}
+		}
+		_, err = out.Write(bytes)
+	} else if n.Sign() == 0 {
+		// Zero is written as a single 0 zero rather than no bytes.
+		err = out.WriteByte(0x00)
+	} else {
+		bytes := n.Bytes()
+		if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+			// We'll have to pad this with 0x00 in order to stop it
+			// looking like a negative number.
+			err = out.WriteByte(0)
+			if err != nil {
+				return
+			}
+		}
+		_, err = out.Write(bytes)
+	}
+	return
+}
+
+func marshalLength(out *forkableWriter, i int) (err error) {
+	n := lengthLength(i)
+
+	for ; n > 0; n-- {
+		err = out.WriteByte(byte(i >> uint((n-1)*8)))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func lengthLength(i int) (numBytes int) {
+	numBytes = 1
+	for i > 255 {
+		numBytes++
+		i >>= 8
+	}
+	return
+}
+
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
+	b := uint8(t.class) << 6
+	if t.isCompound {
+		b |= 0x20
+	}
+	if t.tag >= 31 {
+		b |= 0x1f
+		err = out.WriteByte(b)
+		if err != nil {
+			return
+		}
+		err = marshalBase128Int(out, int64(t.tag))
+		if err != nil {
+			return
+		}
+	} else {
+		b |= uint8(t.tag)
+		err = out.WriteByte(b)
+		if err != nil {
+			return
+		}
+	}
+
+	if t.length >= 128 {
+		l := lengthLength(t.length)
+		err = out.WriteByte(0x80 | byte(l))
+		if err != nil {
+			return
+		}
+		err = marshalLength(out, t.length)
+		if err != nil {
+			return
+		}
+	} else {
+		err = out.WriteByte(byte(t.length))
+		if err != nil {
+			return
+		}
+	}
+
+	return nil
+}
+
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
+	paddingBits := byte((8 - b.BitLength%8) % 8)
+	err = out.WriteByte(paddingBits)
+	if err != nil {
+		return
+	}
+	_, err = out.Write(b.Bytes)
+	return
+}
+
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
+	if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+		return StructuralError{"invalid object identifier"}
+	}
+
+	err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
+	if err != nil {
+		return
+	}
+	for i := 2; i < len(oid); i++ {
+		err = marshalBase128Int(out, int64(oid[i]))
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
+	b := []byte(s)
+	for _, c := range b {
+		if !isPrintable(c) {
+			return StructuralError{"PrintableString contains invalid character"}
+		}
+	}
+
+	_, err = out.Write(b)
+	return
+}
+
+func marshalIA5String(out *forkableWriter, s string) (err error) {
+	b := []byte(s)
+	for _, c := range b {
+		if c > 127 {
+			return StructuralError{"IA5String contains invalid character"}
+		}
+	}
+
+	_, err = out.Write(b)
+	return
+}
+
+func marshalUTF8String(out *forkableWriter, s string) (err error) {
+	_, err = out.Write([]byte(s))
+	return
+}
+
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
+	err = out.WriteByte(byte('0' + (v/10)%10))
+	if err != nil {
+		return
+	}
+	return out.WriteByte(byte('0' + v%10))
+}
+
+func marshalFourDigits(out *forkableWriter, v int) (err error) {
+	var bytes [4]byte
+	for i := range bytes {
+		bytes[3-i] = '0' + byte(v%10)
+		v /= 10
+	}
+	_, err = out.Write(bytes[:])
+	return
+}
+
+func outsideUTCRange(t time.Time) bool {
+	year := t.Year()
+	return year < 1950 || year >= 2050
+}
+
+func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
+	year := t.Year()
+
+	switch {
+	case 1950 <= year && year < 2000:
+		err = marshalTwoDigits(out, year-1900)
+	case 2000 <= year && year < 2050:
+		err = marshalTwoDigits(out, year-2000)
+	default:
+		return StructuralError{"cannot represent time as UTCTime"}
+	}
+	if err != nil {
+		return
+	}
+
+	return marshalTimeCommon(out, t)
+}
+
+func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) {
+	year := t.Year()
+	if year < 0 || year > 9999 {
+		return StructuralError{"cannot represent time as GeneralizedTime"}
+	}
+	if err = marshalFourDigits(out, year); err != nil {
+		return
+	}
+
+	return marshalTimeCommon(out, t)
+}
+
+func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) {
+	_, month, day := t.Date()
+
+	err = marshalTwoDigits(out, int(month))
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, day)
+	if err != nil {
+		return
+	}
+
+	hour, min, sec := t.Clock()
+
+	err = marshalTwoDigits(out, hour)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, min)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, sec)
+	if err != nil {
+		return
+	}
+
+	_, offset := t.Zone()
+
+	switch {
+	case offset/60 == 0:
+		err = out.WriteByte('Z')
+		return
+	case offset > 0:
+		err = out.WriteByte('+')
+	case offset < 0:
+		err = out.WriteByte('-')
+	}
+
+	if err != nil {
+		return
+	}
+
+	offsetMinutes := offset / 60
+	if offsetMinutes < 0 {
+		offsetMinutes = -offsetMinutes
+	}
+
+	err = marshalTwoDigits(out, offsetMinutes/60)
+	if err != nil {
+		return
+	}
+
+	err = marshalTwoDigits(out, offsetMinutes%60)
+	return
+}
+
+func stripTagAndLength(in []byte) []byte {
+	_, offset, err := parseTagAndLength(in, 0)
+	if err != nil {
+		return in
+	}
+	return in[offset:]
+}
+
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
+	switch value.Type() {
+	case flagType:
+		return nil
+	case timeType:
+		t := value.Interface().(time.Time)
+		if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
+			return marshalGeneralizedTime(out, t)
+		} else {
+			return marshalUTCTime(out, t)
+		}
+	case bitStringType:
+		return marshalBitString(out, value.Interface().(BitString))
+	case objectIdentifierType:
+		return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+	case bigIntType:
+		return marshalBigInt(out, value.Interface().(*big.Int))
+	}
+
+	switch v := value; v.Kind() {
+	case reflect.Bool:
+		if v.Bool() {
+			return out.WriteByte(255)
+		} else {
+			return out.WriteByte(0)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return marshalInt64(out, v.Int())
+	case reflect.Struct:
+		t := v.Type()
+
+		startingField := 0
+
+		// If the first element of the structure is a non-empty
+		// RawContents, then we don't bother serializing the rest.
+		if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
+			s := v.Field(0)
+			if s.Len() > 0 {
+				bytes := make([]byte, s.Len())
+				for i := 0; i < s.Len(); i++ {
+					bytes[i] = uint8(s.Index(i).Uint())
+				}
+				/* The RawContents will contain the tag and
+				 * length fields but we'll also be writing
+				 * those ourselves, so we strip them out of
+				 * bytes */
+				_, err = out.Write(stripTagAndLength(bytes))
+				return
+			} else {
+				startingField = 1
+			}
+		}
+
+		for i := startingField; i < t.NumField(); i++ {
+			var pre *forkableWriter
+			pre, out = out.fork()
+			err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
+			if err != nil {
+				return
+			}
+		}
+		return
+	case reflect.Slice:
+		sliceType := v.Type()
+		if sliceType.Elem().Kind() == reflect.Uint8 {
+			bytes := make([]byte, v.Len())
+			for i := 0; i < v.Len(); i++ {
+				bytes[i] = uint8(v.Index(i).Uint())
+			}
+			_, err = out.Write(bytes)
+			return
+		}
+
+		// jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value
+		//var fp fieldParameters
+		params.explicit = false
+		params.tag = nil
+		for i := 0; i < v.Len(); i++ {
+			var pre *forkableWriter
+			pre, out = out.fork()
+			err = marshalField(pre, v.Index(i), params)
+			if err != nil {
+				return
+			}
+		}
+		return
+	case reflect.String:
+		switch params.stringType {
+		case TagIA5String:
+			return marshalIA5String(out, v.String())
+		case TagPrintableString:
+			return marshalPrintableString(out, v.String())
+		default:
+			return marshalUTF8String(out, v.String())
+		}
+	}
+
+	return StructuralError{"unknown Go type"}
+}
+
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
+	if !v.IsValid() {
+		return fmt.Errorf("asn1: cannot marshal nil value")
+	}
+	// If the field is an interface{} then recurse into it.
+	if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+		return marshalField(out, v.Elem(), params)
+	}
+
+	if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
+		return
+	}
+
+	if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
+		defaultValue := reflect.New(v.Type()).Elem()
+		defaultValue.SetInt(*params.defaultValue)
+
+		if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
+			return
+		}
+	}
+
+	// If no default value is given then the zero value for the type is
+	// assumed to be the default value. This isn't obviously the correct
+	// behaviour, but it's what Go has traditionally done.
+	if params.optional && params.defaultValue == nil {
+		if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+			return
+		}
+	}
+
+	if v.Type() == rawValueType {
+		rv := v.Interface().(RawValue)
+		if len(rv.FullBytes) != 0 {
+			_, err = out.Write(rv.FullBytes)
+		} else {
+			err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
+			if err != nil {
+				return
+			}
+			_, err = out.Write(rv.Bytes)
+		}
+		return
+	}
+
+	tag, isCompound, ok := getUniversalType(v.Type())
+	if !ok {
+		err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
+		return
+	}
+	class := ClassUniversal
+
+	if params.timeType != 0 && tag != TagUTCTime {
+		return StructuralError{"explicit time type given to non-time member"}
+	}
+
+	// jtasn1 updated to allow slices of strings
+	if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) {
+		return StructuralError{"explicit string type given to non-string member"}
+	}
+
+	switch tag {
+	case TagPrintableString:
+		if params.stringType == 0 {
+			// This is a string without an explicit string type. We'll use
+			// a PrintableString if the character set in the string is
+			// sufficiently limited, otherwise we'll use a UTF8String.
+			for _, r := range v.String() {
+				if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
+					if !utf8.ValidString(v.String()) {
+						return errors.New("asn1: string not valid UTF-8")
+					}
+					tag = TagUTF8String
+					break
+				}
+			}
+		} else {
+			tag = params.stringType
+		}
+	case TagUTCTime:
+		if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
+			tag = TagGeneralizedTime
+		}
+	}
+
+	if params.set {
+		if tag != TagSequence {
+			return StructuralError{"non sequence tagged as set"}
+		}
+		tag = TagSet
+	}
+
+	tags, body := out.fork()
+
+	err = marshalBody(body, v, params)
+	if err != nil {
+		return
+	}
+
+	bodyLen := body.Len()
+
+	var explicitTag *forkableWriter
+	if params.explicit {
+		explicitTag, tags = tags.fork()
+	}
+
+	if !params.explicit && params.tag != nil {
+		// implicit tag.
+		tag = *params.tag
+		class = ClassContextSpecific
+	}
+
+	err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
+	if err != nil {
+		return
+	}
+
+	if params.explicit {
+		err = marshalTagAndLength(explicitTag, tagAndLength{
+			class:      ClassContextSpecific,
+			tag:        *params.tag,
+			length:     bodyLen + tags.Len(),
+			isCompound: true,
+		})
+	}
+
+	return err
+}
+
+// Marshal returns the ASN.1 encoding of val.
+//
+// In addition to the struct tags recognised by Unmarshal, the following can be
+// used:
+//
+//	ia5:		causes strings to be marshaled as ASN.1, IA5 strings
+//	omitempty:	causes empty slices to be skipped
+//	printable:	causes strings to be marshaled as ASN.1, PrintableString strings.
+//	utf8:		causes strings to be marshaled as ASN.1, UTF8 strings
+func Marshal(val interface{}) ([]byte, error) {
+	var out bytes.Buffer
+	v := reflect.ValueOf(val)
+	f := newForkableWriter()
+	err := marshalField(f, v, fieldParameters{})
+	if err != nil {
+		return nil, err
+	}
+	_, err = f.writeTo(&out)
+	return out.Bytes(), err
+}
diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..75d4187
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,98 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2
+
+import (
+	"crypto/hmac"
+	"hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+	return Key64(password, salt, int64(iter), int64(keyLen), h)
+}
+
+// Key64 derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. Key64 uses
+// int64 for the iteration count and key length to allow larger values.
+// The key is derived based on the method described as PBKDF2 with the HMAC
+// variant using the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte {
+	prf := hmac.New(h, password)
+	hashLen := int64(prf.Size())
+	numBlocks := (keyLen + hashLen - 1) / hashLen
+
+	var buf [4]byte
+	dk := make([]byte, 0, numBlocks*hashLen)
+	U := make([]byte, hashLen)
+	for block := int64(1); block <= numBlocks; block++ {
+		// N.B.: || means concatenation, ^ means XOR
+		// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+		// U_1 = PRF(password, salt || uint(i))
+		prf.Reset()
+		prf.Write(salt)
+		buf[0] = byte(block >> 24)
+		buf[1] = byte(block >> 16)
+		buf[2] = byte(block >> 8)
+		buf[3] = byte(block)
+		prf.Write(buf[:4])
+		dk = prf.Sum(dk)
+		T := dk[int64(len(dk))-hashLen:]
+		copy(U, T)
+
+		// U_n = PRF(password, U_(n-1))
+		for n := int64(2); n <= iter; n++ {
+			prf.Reset()
+			prf.Write(U)
+			U = U[:0]
+			U = prf.Sum(U)
+			for x := range U {
+				T[x] ^= U[x]
+			}
+		}
+	}
+	return dk[:keyLen]
+}
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 0000000..1eb75ef
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md
new file mode 100644
index 0000000..ea7324d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/README.md
@@ -0,0 +1,79 @@
+# Finite State Entropy

+

+This package provides Finite State Entropy encoding and decoding.

+            

+Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) 

+encoding provides a fast near-optimal symbol encoding/decoding

+for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).

+

+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.

+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,

+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 

+

+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)

+

+## News

+

+ * Feb 2018: First implementation released. Consider this beta software for now.

+

+# Usage

+

+This package provides a low level interface that allows to compress single independent blocks. 

+

+Each block is separate, and there is no built in integrity checks. 

+This means that the caller should keep track of block sizes and also do checksums if needed.  

+

+Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.

+You must provide input and will receive the output and maybe an error.

+

+These error values can be returned:

+

+| Error               | Description                                                                 |

+|---------------------|-----------------------------------------------------------------------------|

+| `<nil>`             | Everything ok, output is returned                                           |

+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |

+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |

+| `(error)`           | An internal error occurred.                                                 |

+

+As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.

+

+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object 

+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 

+object can be used for both.   

+

+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this

+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.

+

+Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.

+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back

+your input was likely corrupted. 

+

+It is important to note that a successful decoding does *not* mean your output matches your original input. 

+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.

+

+For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).

+

+# Performance

+

+A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.  

+All compression functions are currently only running on the calling goroutine so only one core will be used per block.  

+

+The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input

+is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be 

+beneficial to transpose all your input values down by 64.   

+

+With moderate block sizes around 64k speed are typically 200MB/s per core for compression and 

+around 300MB/s decompression speed. 

+

+The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. 

+

+# Plans

+

+At one point, more internals will be exposed to facilitate more "expert" usage of the components. 

+

+A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).  

+

+# Contributing

+

+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 

+changes will likely not be accepted. If in doubt open an issue before writing the PR.  
\ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go
new file mode 100644
index 0000000..b9db204
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitreader.go
@@ -0,0 +1,107 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	b.fill()
+	b.fill()
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) uint16 {
+	if n == 0 || b.bitsRead >= 64 {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// Do single re-slice to avoid bounds checks.
+	v := b.in[b.off-4 : b.off]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4 : b.off]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
new file mode 100644
index 0000000..43e4636
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -0,0 +1,168 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go
new file mode 100644
index 0000000..f228a46
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bytereader.go
@@ -0,0 +1,56 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	b2 := b.b[b.off : b.off+4 : b.off+4]
+	v3 := int32(b2[3])
+	v2 := int32(b2[2])
+	v1 := int32(b2[1])
+	v0 := int32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	b2 := b.b[b.off : b.off+4 : b.off+4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
new file mode 100644
index 0000000..b69237c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -0,0 +1,684 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Compress the input bytes. Input must be < 2GB.
+// Provide a Scratch buffer to avoid memory allocations.
+// Note that the output is also kept in the scratch buffer.
+// If input is too hard to compress, ErrIncompressible is returned.
+// If input is a single byte value repeated ErrUseRLE is returned.
+func Compress(in []byte, s *Scratch) ([]byte, error) {
+	if len(in) <= 1 {
+		return nil, ErrIncompressible
+	}
+	if len(in) > (2<<30)-1 {
+		return nil, errors.New("input too big, must be < 2GB")
+	}
+	s, err := s.prepare(in)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	if maxCount == 0 {
+		maxCount = s.countSimple(in)
+	}
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount == len(in) {
+		// One symbol, use RLE
+		return nil, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, ErrIncompressible
+	}
+	s.optimalTableLog()
+	err = s.normalizeCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.writeCount()
+	if err != nil {
+		return nil, err
+	}
+
+	if false {
+		err = s.validateNorm()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	err = s.buildCTable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.compress(in)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.bw.out
+	// Check if we compressed.
+	if len(s.Out) >= len(in) {
+		return nil, ErrIncompressible
+	}
+	return s.Out, nil
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + first.deltaFindState
+	c.state = c.stateTable[lu]
+	return
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encodeZero(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+	c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+	c.bw.flush()
+}
+
+// compress is the main compression loop that will encode the input from the last byte to the first.
+func (s *Scratch) compress(src []byte) error {
+	if len(src) <= 2 {
+		return errors.New("compress: src too small")
+	}
+	tt := s.ct.symbolTT[:256]
+	s.bw.reset(s.Out)
+
+	// Our two states each encodes every second byte.
+	// Last byte encoded (first byte decoded) will always be encoded by c1.
+	var c1, c2 cState
+
+	// Encode so remaining size is divisible by 4.
+	ip := len(src)
+	if ip&1 == 1 {
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		c1.encodeZero(tt[src[ip-3]])
+		ip -= 3
+	} else {
+		c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+		c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+		ip -= 2
+	}
+	if ip&2 != 0 {
+		c2.encodeZero(tt[src[ip-1]])
+		c1.encodeZero(tt[src[ip-2]])
+		ip -= 2
+	}
+
+	// Main compression loop.
+	switch {
+	case !s.zeroBits && s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush.
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case !s.zeroBits:
+		// We do not need to check if any output is 0 bits.
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encode(tt[v0])
+			c1.encode(tt[v1])
+			s.bw.flush32()
+			c2.encode(tt[v2])
+			c1.encode(tt[v3])
+			ip -= 4
+		}
+	case s.actualTableLog <= 8:
+		// We can encode 4 symbols without requiring a flush
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	default:
+		for ip >= 4 {
+			s.bw.flush32()
+			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			c2.encodeZero(tt[v0])
+			c1.encodeZero(tt[v1])
+			s.bw.flush32()
+			c2.encodeZero(tt[v2])
+			c1.encodeZero(tt[v3])
+			ip -= 4
+		}
+	}
+
+	// Flush final state.
+	// Used to initialize state when decoding.
+	c2.flush(s.actualTableLog)
+	c1.flush(s.actualTableLog)
+
+	return s.bw.close()
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *Scratch) writeCount() error {
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+	)
+	if cap(s.Out) < maxHeaderSize {
+		s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
+	}
+	outP := uint(0)
+	out := s.Out[:maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return errors.New("internal error: remaining<1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += (bitCount + 7) / 8
+
+	if uint16(charnum) > s.symbolLen {
+		return errors.New("internal error: charnum > s.symbolLen")
+	}
+	s.Out = out[:outP]
+	return nil
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaFindState int32
+	deltaNbBits    uint32
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *Scratch) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < int(tableSize) {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *Scratch) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [maxSymbolValue + 2]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = int32(total - 1)
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = int32(total - v)
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int) {
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m)
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
+	minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > maxTableLog {
+		tableLog = maxTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+func (s *Scratch) normalizeCount() error {
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(s.br.remain())
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(s.br.remain() >> tableLog)
+	)
+
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		return s.normalizeCount2()
+	}
+	s.norm[largest] += stillToDistribute
+	return nil
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *Scratch) normalizeCount2() error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(s.br.remain())
+		tableLog     = s.actualTableLog
+		lowThreshold = uint32(total >> tableLog)
+		lowOne       = uint32((total * 3) >> (tableLog + 1))
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = uint32((total * 3) / (toDistribute * 2))
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *Scratch) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
new file mode 100644
index 0000000..413ec3b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -0,0 +1,374 @@
+package fse
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 15
+)
+
+// Decompress a block of data.
+// You can provide a scratch buffer to avoid allocations.
+// If nil is provided a temporary one will be allocated.
+// It is possible, but by no way guaranteed that corrupt data will
+// return an error.
+// It is up to the caller to verify integrity of the returned data.
+// Use a predefined Scrach to set maximum acceptable output size.
+func Decompress(b []byte, s *Scratch) ([]byte, error) {
+	s, err := s.prepare(b)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.Out[:0]
+	err = s.readNCount()
+	if err != nil {
+		return nil, err
+	}
+	err = s.buildDtable()
+	if err != nil {
+		return nil, err
+	}
+	err = s.decompress()
+	if err != nil {
+		return nil, err
+	}
+
+	return s.Out, nil
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *Scratch) readNCount() error {
+	var (
+		charnum   uint16
+		previous0 bool
+		b         = &s.br
+	)
+	iend := b.remain()
+	if iend < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 {
+		if previous0 {
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				n0 += 24
+				if b.off < iend-5 {
+					b.advance(2)
+					bitStream = b.Uint32() >> bitCount
+				} else {
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			for charnum < n0 {
+				s.norm[charnum&0xff] = 0
+				charnum++
+			}
+
+			if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				bitStream = b.Uint32() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*(threshold) - 1) - (remaining)
+		var count int32
+
+		if (int32(bitStream) & (threshold - 1)) < max {
+			count = int32(bitStream) & (threshold - 1)
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		count-- // extra accuracy
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+		if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+		}
+		bitStream = b.Uint32() >> (bitCount & 31)
+	}
+	s.symbolLen = charnum
+
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	return nil
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+type decSymbol struct {
+	newState uint16
+	symbol   uint8
+	nbBits   uint8
+}
+
+// allocDtable will allocate decoding tables if they are not big enough.
+func (s *Scratch) allocDtable() {
+	tableSize := 1 << s.actualTableLog
+	if cap(s.decTable) < int(tableSize) {
+		s.decTable = make([]decSymbol, tableSize)
+	}
+	s.decTable = s.decTable[:tableSize]
+
+	if cap(s.ct.tableSymbol) < 256 {
+		s.ct.tableSymbol = make([]byte, 256)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:256]
+
+	if cap(s.ct.stateTable) < 256 {
+		s.ct.stateTable = make([]uint16, 256)
+	}
+	s.ct.stateTable = s.ct.stateTable[:256]
+}
+
+// buildDtable will build the decoding table.
+func (s *Scratch) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	s.allocDtable()
+	symbolNext := s.ct.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	s.zeroBits = false
+	{
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.decTable[highThreshold].symbol = uint8(i)
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				if v >= largeLimit {
+					s.zeroBits = true
+				}
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.decTable[position].symbol = uint8(ss)
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.decTable {
+			symbol := v.symbol
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.decTable[u].nbBits = nBits
+			newState := (nextState << nBits) - tableSize
+			if newState >= tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.decTable[u].newState = newState
+		}
+	}
+	return nil
+}
+
+// decompress will decompress the bitstream.
+// If the buffer is over-read an error is returned.
+func (s *Scratch) decompress() error {
+	br := &s.bits
+	br.init(s.br.unread())
+
+	var s1, s2 decoder
+	// Initialize and decode first state and symbol.
+	s1.init(br, s.decTable, s.actualTableLog)
+	s2.init(br, s.decTable, s.actualTableLog)
+
+	// Use temp table to avoid bound checks/append penalty.
+	var tmp = s.ct.tableSymbol[:256]
+	var off uint8
+
+	// Main part
+	if !s.zeroBits {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.nextFast()
+			tmp[off+1] = s2.nextFast()
+			br.fillFast()
+			tmp[off+2] = s1.nextFast()
+			tmp[off+3] = s2.nextFast()
+			off += 4
+			// When off is 0, we have overflowed and should write.
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	} else {
+		for br.off >= 8 {
+			br.fillFast()
+			tmp[off+0] = s1.next()
+			tmp[off+1] = s2.next()
+			br.fillFast()
+			tmp[off+2] = s1.next()
+			tmp[off+3] = s2.next()
+			off += 4
+			if off == 0 {
+				s.Out = append(s.Out, tmp...)
+				// When off is 0, we have overflowed and should write.
+				if len(s.Out) >= s.DecompressLimit {
+					return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+				}
+			}
+		}
+	}
+	s.Out = append(s.Out, tmp[:off]...)
+
+	// Final bits, a bit more expensive check
+	for {
+		if s1.finished() {
+			s.Out = append(s.Out, s1.final(), s2.final())
+			break
+		}
+		br.fill()
+		s.Out = append(s.Out, s1.next())
+		if s2.finished() {
+			s.Out = append(s.Out, s2.final(), s1.final())
+			break
+		}
+		s.Out = append(s.Out, s2.next())
+		if len(s.Out) >= s.DecompressLimit {
+			return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+		}
+	}
+	return br.close()
+}
+
+// decoder keeps track of the current state and updates it from the bitstream.
+type decoder struct {
+	state uint16
+	br    *bitReader
+	dt    []decSymbol
+}
+
+// init will initialize the decoder and read the first state from the stream.
+func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
+	d.dt = dt
+	d.br = in
+	d.state = uint16(in.getBits(tableLog))
+}
+
+// next returns the next symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) next() uint8 {
+	n := &d.dt[d.state]
+	lowBits := d.br.getBits(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (d *decoder) finished() bool {
+	return d.br.finished() && d.dt[d.state].nbBits > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (d *decoder) final() uint8 {
+	return d.dt[d.state].symbol
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) nextFast() uint8 {
+	n := d.dt[d.state]
+	lowBits := d.br.getBitsFast(n.nbBits)
+	d.state = n.newState + lowBits
+	return n.symbol
+}
diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go
new file mode 100644
index 0000000..075357b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/fse.go
@@ -0,0 +1,143 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+// Package fse provides Finite State Entropy encoding and decoding.
+//
+// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
+// for byte blocks as implemented in zstd.
+//
+// See https://github.com/klauspost/compress/tree/master/fse for more information.
+package fse
+
+import (
+	"errors"
+	"fmt"
+	"math/bits"
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage     = 14
+	defaultMemoryUsage = 13
+
+	maxTableLog     = maxMemoryUsage - 2
+	maxTablesize    = 1 << maxTableLog
+	defaultTablelog = defaultMemoryUsage - 2
+	minTablelog     = 5
+	maxSymbolValue  = 255
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type Scratch struct {
+	// Private
+	count          [maxSymbolValue + 1]uint32
+	norm           [maxSymbolValue + 1]int16
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	br             byteReader
+	bits           bitReader
+	bw             bitWriter
+	ct             cTable      // Compression tables.
+	decTable       []decSymbol // Decompression table.
+	zeroBits       bool        // no bits has prob > 50%.
+	clearCount     bool        // clear count
+	maxCount       int         // count of the most probable symbol
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	TableLog uint8
+
+	// DecompressLimit limits the maximum decoded size acceptable.
+	// If > 0 decompression will stop when approximately this many bytes
+	// has been decoded.
+	// If 0, maximum size will be 2GB.
+	DecompressLimit int
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *Scratch) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = 255
+	}
+	if s.TableLog == 0 {
+		s.TableLog = defaultTablelog
+	}
+	if s.TableLog > maxTableLog {
+		return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	s.br.init(in)
+	if s.DecompressLimit == 0 {
+		// Max size 2GB.
+		s.DecompressLimit = (2 << 30) - 1
+	}
+
+	return s, nil
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore
new file mode 100644
index 0000000..b3d2629
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/.gitignore
@@ -0,0 +1 @@
+/huff0-fuzz.zip
diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md
new file mode 100644
index 0000000..0a8448c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/README.md
@@ -0,0 +1,87 @@
+# Huff0 entropy compression

+

+This package provides Huff0 encoding and decoding as used in zstd.

+            

+[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), 

+a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU 

+(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds.

+

+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.

+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,

+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 

+

+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0)

+

+THIS PACKAGE IS NOT CONSIDERED STABLE AND API OR ENCODING MAY CHANGE IN THE FUTURE.

+

+## News

+

+ * Mar 2018: First implementation released. Consider this beta software for now.

+

+# Usage

+

+This package provides a low level interface that allows to compress single independent blocks. 

+

+Each block is separate, and there is no built in integrity checks. 

+This means that the caller should keep track of block sizes and also do checksums if needed.  

+

+Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and 

+[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions.

+You must provide input and will receive the output and maybe an error.

+

+These error values can be returned:

+

+| Error               | Description                                                                 |

+|---------------------|-----------------------------------------------------------------------------|

+| `<nil>`             | Everything ok, output is returned                                           |

+| `ErrIncompressible` | Returned when input is judged to be too hard to compress                    |

+| `ErrUseRLE`         | Returned from the compressor when the input is a single byte value repeated |

+| `ErrTooBig`         | Returned if the input block exceeds the maximum allowed size (128 Kib)      |

+| `(error)`           | An internal error occurred.                                                 |

+

+

+As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these.

+

+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object 

+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 

+object can be used for both.   

+

+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this

+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.

+

+The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding.  

+

+## Tables and re-use

+

+Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. 

+

+The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) 

+that controls this behaviour. See the documentation for details. This can be altered between each block.

+

+Do however note that this information is *not* stored in the output block and it is up to the users of the package to

+record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called,

+based on the boolean reported back from the CompressXX call. 

+

+If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the 

+[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object.

+

+## Decompressing

+

+The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable).

+This will initialize the decoding tables. 

+You can supply the complete block to `ReadTable` and it will return the data part of the block 

+which can be given to the decompressor. 

+

+Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) 

+or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function.

+

+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back

+your input was likely corrupted. 

+

+It is important to note that a successful decoding does *not* mean your output matches your original input. 

+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.

+

+# Contributing

+

+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 

+changes will likely not be accepted. If in doubt open an issue before writing the PR.
\ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
new file mode 100644
index 0000000..7d0903c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -0,0 +1,115 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import (
+	"errors"
+	"io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint // next byte to read is at in[off - 1]
+	value    uint64
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	b.fill()
+	b.fill()
+	b.bitsRead += 8 - uint8(highBit32(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) uint16 {
+	if n == 0 || b.bitsRead >= 64 {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return v
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) peekBitsFast(n uint8) uint16 {
+	const regMask = 64 - 1
+	v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// Do single re-slice to avoid bounds checks.
+	v := b.in[b.off-4 : b.off]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off > 4 {
+		v := b.in[b.off-4 : b.off]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
new file mode 100644
index 0000000..bda4021
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -0,0 +1,197 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
+	enc := ct[symbol]
+	b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+	b.nBits += enc.nBits
+}
+
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+	encA := ct[av]
+	encB := ct[bv]
+	sh := b.nBits & 63
+	combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+	b.bitContainer |= combined << sh
+	b.nBits += encA.nBits + encB.nBits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+	if bits == 0 {
+		return
+	}
+	value <<= (16 - bits) & 15
+	value >>= (16 - bits) & 15
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+		return
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+		b.bitContainer >>= 1 << 3
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+		b.bitContainer >>= 2 << 3
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+		b.bitContainer >>= 3 << 3
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+		b.bitContainer >>= 4 << 3
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+		b.bitContainer >>= 5 << 3
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+		b.bitContainer >>= 6 << 3
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+		b.bitContainer >>= 7 << 3
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+		b.bitContainer = 0
+		b.nBits = 0
+		return
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
new file mode 100644
index 0000000..50bcdf6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	v3 := int32(b.b[b.off+3])
+	v2 := int32(b.b[b.off+2])
+	v1 := int32(b.b[b.off+1])
+	v0 := int32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	v3 := uint32(b.b[b.off+3])
+	v2 := uint32(b.b[b.off+2])
+	v1 := uint32(b.b[b.off+1])
+	v0 := uint32(b.b[b.off])
+	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
new file mode 100644
index 0000000..0843cb0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -0,0 +1,651 @@
+package huff0
+
+import (
+	"fmt"
+	"runtime"
+	"sync"
+)
+
+// Compress1X will compress the input.
+// The output can be decoded using Decompress1X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	return compress(in, s, s.compress1X)
+}
+
+// Compress4X will compress the input. The input is split into 4 independent blocks
+// and compressed similar to Compress1X.
+// The output can be decoded using Decompress4X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return nil, false, err
+	}
+	if false {
+		// TODO: compress4Xp only slightly faster.
+		const parallelThreshold = 8 << 10
+		if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
+			return compress(in, s, s.compress4X)
+		}
+		return compress(in, s, s.compress4Xp)
+	}
+	return compress(in, s, s.compress4X)
+}
+
+func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
+	// Nuke previous table if we cannot reuse anyway.
+	if s.Reuse == ReusePolicyNone {
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Create histogram, if none was provided.
+	maxCount := s.maxCount
+	var canReuse = false
+	if maxCount == 0 {
+		maxCount, canReuse = s.countSimple(in)
+	} else {
+		canReuse = s.canUseTable(s.prevTable)
+	}
+
+	// We want the output size to be less than this:
+	wantSize := len(in)
+	if s.WantLogLess > 0 {
+		wantSize -= wantSize >> s.WantLogLess
+	}
+
+	// Reset for next run.
+	s.clearCount = true
+	s.maxCount = 0
+	if maxCount >= len(in) {
+		if maxCount > len(in) {
+			return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+		}
+		if len(in) == 1 {
+			return nil, false, ErrIncompressible
+		}
+		// One symbol, use RLE
+		return nil, false, ErrUseRLE
+	}
+	if maxCount == 1 || maxCount < (len(in)>>7) {
+		// Each symbol present maximum once or too well distributed.
+		return nil, false, ErrIncompressible
+	}
+
+	if s.Reuse == ReusePolicyPrefer && canReuse {
+		keepTable := s.cTable
+		keepTL := s.actualTableLog
+		s.cTable = s.prevTable
+		s.actualTableLog = s.prevTableLog
+		s.Out, err = compressor(in)
+		s.cTable = keepTable
+		s.actualTableLog = keepTL
+		if err == nil && len(s.Out) < wantSize {
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+		// Do not attempt to re-use later.
+		s.prevTable = s.prevTable[:0]
+	}
+
+	// Calculate new table.
+	err = s.buildCTable()
+	if err != nil {
+		return nil, false, err
+	}
+
+	if false && !s.canUseTable(s.cTable) {
+		panic("invalid table generated")
+	}
+
+	if s.Reuse == ReusePolicyAllow && canReuse {
+		hSize := len(s.Out)
+		oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
+		newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
+		if oldSize <= hSize+newSize || hSize+12 >= wantSize {
+			// Retain cTable even if we re-use.
+			keepTable := s.cTable
+			keepTL := s.actualTableLog
+
+			s.cTable = s.prevTable
+			s.actualTableLog = s.prevTableLog
+			s.Out, err = compressor(in)
+
+			// Restore ctable.
+			s.cTable = keepTable
+			s.actualTableLog = keepTL
+			if err != nil {
+				return nil, false, err
+			}
+			if len(s.Out) >= wantSize {
+				return nil, false, ErrIncompressible
+			}
+			s.OutData = s.Out
+			return s.Out, true, nil
+		}
+	}
+
+	// Use new table
+	err = s.cTable.write(s)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	s.OutTable = s.Out
+
+	// Compress using new table
+	s.Out, err = compressor(in)
+	if err != nil {
+		s.OutTable = nil
+		return nil, false, err
+	}
+	if len(s.Out) >= wantSize {
+		s.OutTable = nil
+		return nil, false, ErrIncompressible
+	}
+	// Move current table into previous.
+	s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
+	s.OutData = s.Out[len(s.OutTable):]
+	return s.Out, false, nil
+}
+
+func (s *Scratch) compress1X(src []byte) ([]byte, error) {
+	return s.compress1xDo(s.Out, src)
+}
+
+func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+	var bw = bitWriter{out: dst}
+
+	// N is length divisible by 4.
+	n := len(src)
+	n -= n & 3
+	cTable := s.cTable[:256]
+
+	// Encode last bytes.
+	for i := len(src) & 3; i > 0; i-- {
+		bw.encSymbol(cTable, src[n+i-1])
+	}
+	n -= 4
+	if s.actualTableLog <= 8 {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	} else {
+		for ; n >= 0; n -= 4 {
+			tmp := src[n : n+4]
+			// tmp should be len 4
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+			bw.flush32()
+			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+		}
+	}
+	err := bw.close()
+	return bw.out, err
+}
+
+var sixZeros [6]byte
+
+func (s *Scratch) compress4X(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	segmentSize := (len(src) + 3) / 4
+
+	// Add placeholder for output length
+	offsetIdx := len(s.Out)
+	s.Out = append(s.Out, sixZeros[:]...)
+
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		var err error
+		idx := len(s.Out)
+		s.Out, err = s.compress1xDo(s.Out, toDo)
+		if err != nil {
+			return nil, err
+		}
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			length := len(s.Out) - idx
+			s.Out[i*2+offsetIdx] = byte(length)
+			s.Out[i*2+offsetIdx+1] = byte(length >> 8)
+		}
+	}
+
+	return s.Out, nil
+}
+
+// compress4Xp will compress 4 streams using separate goroutines.
+func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
+	if len(src) < 12 {
+		return nil, ErrIncompressible
+	}
+	// Add placeholder for output length
+	s.Out = s.Out[:6]
+
+	segmentSize := (len(src) + 3) / 4
+	var wg sync.WaitGroup
+	var errs [4]error
+	wg.Add(4)
+	for i := 0; i < 4; i++ {
+		toDo := src
+		if len(toDo) > segmentSize {
+			toDo = toDo[:segmentSize]
+		}
+		src = src[len(toDo):]
+
+		// Separate goroutine for each block.
+		go func(i int) {
+			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			wg.Done()
+		}(i)
+	}
+	wg.Wait()
+	for i := 0; i < 4; i++ {
+		if errs[i] != nil {
+			return nil, errs[i]
+		}
+		o := s.tmpOut[i]
+		// Write compressed length as little endian before block.
+		if i < 3 {
+			// Last length is not written.
+			s.Out[i*2] = byte(len(o))
+			s.Out[i*2+1] = byte(len(o) >> 8)
+		}
+
+		// Write output.
+		s.Out = append(s.Out, o...)
+	}
+	return s.Out, nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
+	reuse = true
+	for _, v := range in {
+		s.count[v]++
+	}
+	m := uint32(0)
+	if len(s.prevTable) > 0 {
+		for i, v := range s.count[:] {
+			if v > m {
+				m = v
+			}
+			if v > 0 {
+				s.symbolLen = uint16(i) + 1
+				if i >= len(s.prevTable) {
+					reuse = false
+				} else {
+					if s.prevTable[i].nBits == 0 {
+						reuse = false
+					}
+				}
+			}
+		}
+		return int(m), reuse
+	}
+	for i, v := range s.count[:] {
+		if v > m {
+			m = v
+		}
+		if v > 0 {
+			s.symbolLen = uint16(i) + 1
+		}
+	}
+	return int(m), false
+}
+
+func (s *Scratch) canUseTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 && c[i].nBits == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (s *Scratch) validateTable(c cTable) bool {
+	if len(c) < int(s.symbolLen) {
+		return false
+	}
+	for i, v := range s.count[:s.symbolLen] {
+		if v != 0 {
+			if c[i].nBits == 0 {
+				return false
+			}
+			if c[i].nBits > s.actualTableLog {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
+	if minBitsSrc < minBitsSymbols {
+		return uint8(minBitsSrc)
+	}
+	return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+	tableLog := s.TableLog
+	minBits := s.minTableLog()
+	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minTablelog {
+		tableLog = minTablelog
+	}
+	if tableLog > tableLogMax {
+		tableLog = tableLogMax
+	}
+	s.actualTableLog = tableLog
+}
+
+type cTableEntry struct {
+	val   uint16
+	nBits uint8
+	// We have 8 bits extra
+}
+
+const huffNodesMask = huffNodesLen - 1
+
+func (s *Scratch) buildCTable() error {
+	s.optimalTableLog()
+	s.huffSort()
+	if cap(s.cTable) < maxSymbolValue+1 {
+		s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
+	} else {
+		s.cTable = s.cTable[:s.symbolLen]
+		for i := range s.cTable {
+			s.cTable[i] = cTableEntry{}
+		}
+	}
+
+	var startNode = int16(s.symbolLen)
+	nonNullRank := s.symbolLen - 1
+
+	nodeNb := int16(startNode)
+	huffNode := s.nodes[1 : huffNodesLen+1]
+
+	// This overlays the slice above, but allows "-1" index lookups.
+	// Different from reference implementation.
+	huffNode0 := s.nodes[0 : huffNodesLen+1]
+
+	for huffNode[nonNullRank].count == 0 {
+		nonNullRank--
+	}
+
+	lowS := int16(nonNullRank)
+	nodeRoot := nodeNb + lowS - 1
+	lowN := nodeNb
+	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
+	huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+	nodeNb++
+	lowS -= 2
+	for n := nodeNb; n <= nodeRoot; n++ {
+		huffNode[n].count = 1 << 30
+	}
+	// fake entry, strong barrier
+	huffNode0[0].count = 1 << 31
+
+	// create parents
+	for nodeNb <= nodeRoot {
+		var n1, n2 int16
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n1 = lowS
+			lowS--
+		} else {
+			n1 = lowN
+			lowN++
+		}
+		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+			n2 = lowS
+			lowS--
+		} else {
+			n2 = lowN
+			lowN++
+		}
+
+		huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
+		huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+		nodeNb++
+	}
+
+	// distribute weights (unlimited tree height)
+	huffNode[nodeRoot].nbBits = 0
+	for n := nodeRoot - 1; n >= startNode; n-- {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	for n := uint16(0); n <= nonNullRank; n++ {
+		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+	}
+	s.actualTableLog = s.setMaxHeight(int(nonNullRank))
+	maxNbBits := s.actualTableLog
+
+	// fill result into tree (val, nbBits)
+	if maxNbBits > tableLogMax {
+		return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
+	}
+	var nbPerRank [tableLogMax + 1]uint16
+	var valPerRank [16]uint16
+	for _, v := range huffNode[:nonNullRank+1] {
+		nbPerRank[v.nbBits]++
+	}
+	// determine stating value per rank
+	{
+		min := uint16(0)
+		for n := maxNbBits; n > 0; n-- {
+			// get starting value within each rank
+			valPerRank[n] = min
+			min += nbPerRank[n]
+			min >>= 1
+		}
+	}
+
+	// push nbBits per symbol, symbol order
+	for _, v := range huffNode[:nonNullRank+1] {
+		s.cTable[v.symbol].nBits = v.nbBits
+	}
+
+	// assign value within rank, symbol order
+	t := s.cTable[:s.symbolLen]
+	for n, val := range t {
+		nbits := val.nBits & 15
+		v := valPerRank[nbits]
+		t[n].val = v
+		valPerRank[nbits] = v + 1
+	}
+
+	return nil
+}
+
+// huffSort will sort symbols, decreasing order.
+func (s *Scratch) huffSort() {
+	type rankPos struct {
+		base    uint32
+		current uint32
+	}
+
+	// Clear nodes
+	nodes := s.nodes[:huffNodesLen+1]
+	s.nodes = nodes
+	nodes = nodes[1 : huffNodesLen+1]
+
+	// Sort into buckets based on length of symbol count.
+	var rank [32]rankPos
+	for _, v := range s.count[:s.symbolLen] {
+		r := highBit32(v+1) & 31
+		rank[r].base++
+	}
+	// maxBitLength is log2(BlockSizeMax) + 1
+	const maxBitLength = 18 + 1
+	for n := maxBitLength; n > 0; n-- {
+		rank[n-1].base += rank[n].base
+	}
+	for n := range rank[:maxBitLength] {
+		rank[n].current = rank[n].base
+	}
+	for n, c := range s.count[:s.symbolLen] {
+		r := (highBit32(c+1) + 1) & 31
+		pos := rank[r].current
+		rank[r].current++
+		prev := nodes[(pos-1)&huffNodesMask]
+		for pos > rank[r].base && c > prev.count {
+			nodes[pos&huffNodesMask] = prev
+			pos--
+			prev = nodes[(pos-1)&huffNodesMask]
+		}
+		nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+	}
+	return
+}
+
+func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
+	maxNbBits := s.actualTableLog
+	huffNode := s.nodes[1 : huffNodesLen+1]
+	//huffNode = huffNode[: huffNodesLen]
+
+	largestBits := huffNode[lastNonNull].nbBits
+
+	// early exit : no elt > maxNbBits
+	if largestBits <= maxNbBits {
+		return largestBits
+	}
+	totalCost := int(0)
+	baseCost := int(1) << (largestBits - maxNbBits)
+	n := uint32(lastNonNull)
+
+	for huffNode[n].nbBits > maxNbBits {
+		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
+		huffNode[n].nbBits = maxNbBits
+		n--
+	}
+	// n stops at huffNode[n].nbBits <= maxNbBits
+
+	for huffNode[n].nbBits == maxNbBits {
+		n--
+	}
+	// n end at index of smallest symbol using < maxNbBits
+
+	// renorm totalCost
+	totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
+
+	// repay normalized cost
+	{
+		const noSymbol = 0xF0F0F0F0
+		var rankLast [tableLogMax + 2]uint32
+
+		for i := range rankLast[:] {
+			rankLast[i] = noSymbol
+		}
+
+		// Get pos of last (smallest) symbol per rank
+		{
+			currentNbBits := uint8(maxNbBits)
+			for pos := int(n); pos >= 0; pos-- {
+				if huffNode[pos].nbBits >= currentNbBits {
+					continue
+				}
+				currentNbBits = huffNode[pos].nbBits // < maxNbBits
+				rankLast[maxNbBits-currentNbBits] = uint32(pos)
+			}
+		}
+
+		for totalCost > 0 {
+			nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
+
+			for ; nBitsToDecrease > 1; nBitsToDecrease-- {
+				highPos := rankLast[nBitsToDecrease]
+				lowPos := rankLast[nBitsToDecrease-1]
+				if highPos == noSymbol {
+					continue
+				}
+				if lowPos == noSymbol {
+					break
+				}
+				highTotal := huffNode[highPos].count
+				lowTotal := 2 * huffNode[lowPos].count
+				if highTotal <= lowTotal {
+					break
+				}
+			}
+			// only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
+			// HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
+			// FIXME: try to remove
+			for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
+				nBitsToDecrease++
+			}
+			totalCost -= 1 << (nBitsToDecrease - 1)
+			if rankLast[nBitsToDecrease-1] == noSymbol {
+				// this rank is no longer empty
+				rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
+			}
+			huffNode[rankLast[nBitsToDecrease]].nbBits++
+			if rankLast[nBitsToDecrease] == 0 {
+				/* special case, reached largest symbol */
+				rankLast[nBitsToDecrease] = noSymbol
+			} else {
+				rankLast[nBitsToDecrease]--
+				if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+					rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
+				}
+			}
+		}
+
+		for totalCost < 0 { /* Sometimes, cost correction overshoot */
+			if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+				for huffNode[n].nbBits == maxNbBits {
+					n--
+				}
+				huffNode[n+1].nbBits--
+				rankLast[1] = n + 1
+				totalCost++
+				continue
+			}
+			huffNode[rankLast[1]+1].nbBits--
+			rankLast[1]++
+			totalCost++
+		}
+	}
+	return maxNbBits
+}
+
+type nodeElt struct {
+	count  uint32
+	parent uint16
+	symbol byte
+	nbBits uint8
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
new file mode 100644
index 0000000..97ae66a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -0,0 +1,472 @@
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/fse"
+)
+
+type dTable struct {
+	single []dEntrySingle
+	double []dEntryDouble
+}
+
+// single-symbols decoding
+type dEntrySingle struct {
+	entry uint16
+}
+
+// double-symbols decoding
+type dEntryDouble struct {
+	seq   uint16
+	nBits uint8
+	len   uint8
+}
+
+// ReadTable will read a table from the input.
+// The size of the input may be larger than the table definition.
+// Any content remaining after the table definition will be returned.
+// If no Scratch is provided a new one is allocated.
+// The returned Scratch can be used for decoding input using this table.
+func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
+	s, err = s.prepare(in)
+	if err != nil {
+		return s, nil, err
+	}
+	if len(in) <= 1 {
+		return s, nil, errors.New("input too small for table")
+	}
+	iSize := in[0]
+	in = in[1:]
+	if iSize >= 128 {
+		// Uncompressed
+		oSize := iSize - 127
+		iSize = (oSize + 1) / 2
+		if int(iSize) > len(in) {
+			return s, nil, errors.New("input too small for table")
+		}
+		for n := uint8(0); n < oSize; n += 2 {
+			v := in[n/2]
+			s.huffWeight[n] = v >> 4
+			s.huffWeight[n+1] = v & 15
+		}
+		s.symbolLen = uint16(oSize)
+		in = in[iSize:]
+	} else {
+		if len(in) <= int(iSize) {
+			return s, nil, errors.New("input too small for table")
+		}
+		// FSE compressed weights
+		s.fse.DecompressLimit = 255
+		hw := s.huffWeight[:]
+		s.fse.Out = hw
+		b, err := fse.Decompress(in[:iSize], s.fse)
+		s.fse.Out = nil
+		if err != nil {
+			return s, nil, err
+		}
+		if len(b) > 255 {
+			return s, nil, errors.New("corrupt input: output table too large")
+		}
+		s.symbolLen = uint16(len(b))
+		in = in[iSize:]
+	}
+
+	// collect weight stats
+	var rankStats [16]uint32
+	weightTotal := uint32(0)
+	for _, v := range s.huffWeight[:s.symbolLen] {
+		if v > tableLogMax {
+			return s, nil, errors.New("corrupt input: weight too large")
+		}
+		v2 := v & 15
+		rankStats[v2]++
+		weightTotal += (1 << v2) >> 1
+	}
+	if weightTotal == 0 {
+		return s, nil, errors.New("corrupt input: weights zero")
+	}
+
+	// get last non-null symbol weight (implied, total must be 2^n)
+	{
+		tableLog := highBit32(weightTotal) + 1
+		if tableLog > tableLogMax {
+			return s, nil, errors.New("corrupt input: tableLog too big")
+		}
+		s.actualTableLog = uint8(tableLog)
+		// determine last weight
+		{
+			total := uint32(1) << tableLog
+			rest := total - weightTotal
+			verif := uint32(1) << highBit32(rest)
+			lastWeight := highBit32(rest) + 1
+			if verif != rest {
+				// last value must be a clean power of 2
+				return s, nil, errors.New("corrupt input: last value not power of two")
+			}
+			s.huffWeight[s.symbolLen] = uint8(lastWeight)
+			s.symbolLen++
+			rankStats[lastWeight]++
+		}
+	}
+
+	if (rankStats[1] < 2) || (rankStats[1]&1 != 0) {
+		// by construction : at least 2 elts of rank 1, must be even
+		return s, nil, errors.New("corrupt input: min elt size, even check failed ")
+	}
+
+	// TODO: Choose between single/double symbol decoding
+
+	// Calculate starting value for each rank
+	{
+		var nextRankStart uint32
+		for n := uint8(1); n < s.actualTableLog+1; n++ {
+			current := nextRankStart
+			nextRankStart += rankStats[n] << (n - 1)
+			rankStats[n] = current
+		}
+	}
+
+	// fill DTable (always full size)
+	tSize := 1 << tableLogMax
+	if len(s.dt.single) != tSize {
+		s.dt.single = make([]dEntrySingle, tSize)
+	}
+	for n, w := range s.huffWeight[:s.symbolLen] {
+		if w == 0 {
+			continue
+		}
+		length := (uint32(1) << w) >> 1
+		d := dEntrySingle{
+			entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
+		}
+		single := s.dt.single[rankStats[w] : rankStats[w]+length]
+		for i := range single {
+			single[i] = d
+		}
+		rankStats[w] += length
+	}
+	return s, in, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
+	if len(s.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	var br bitReader
+	err = br.init(in)
+	if err != nil {
+		return nil, err
+	}
+	s.Out = s.Out[:0]
+
+	decode := func() byte {
+		val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
+		v := s.dt.single[val]
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
+	}
+	hasDec := func(v dEntrySingle) byte {
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
+	}
+
+	// Avoid bounds check by always having full sized table.
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	dt := s.dt.single[:tlSize]
+
+	// Use temp table to avoid bound checks/append penalty.
+	var tmp = s.huffWeight[:256]
+	var off uint8
+
+	for br.off >= 8 {
+		br.fillFast()
+		tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+		tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+		br.fillFast()
+		tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+		tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
+		off += 4
+		if off == 0 {
+			if len(s.Out)+256 > s.MaxDecodedSize {
+				br.close()
+				return nil, ErrMaxDecodedSizeExceeded
+			}
+			s.Out = append(s.Out, tmp...)
+		}
+	}
+
+	if len(s.Out)+int(off) > s.MaxDecodedSize {
+		br.close()
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	s.Out = append(s.Out, tmp[:off]...)
+
+	for !br.finished() {
+		br.fill()
+		if len(s.Out) >= s.MaxDecodedSize {
+			br.close()
+			return nil, ErrMaxDecodedSizeExceeded
+		}
+		s.Out = append(s.Out, decode())
+	}
+	return s.Out, br.close()
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+	if len(s.dt.single) == 0 {
+		return nil, errors.New("no table loaded")
+	}
+	if len(in) < 6+(4*1) {
+		return nil, errors.New("input too small")
+	}
+	if dstSize > s.MaxDecodedSize {
+		return nil, ErrMaxDecodedSizeExceeded
+	}
+	// TODO: We do not detect when we overrun a buffer, except if the last one does.
+
+	var br [4]bitReader
+	start := 6
+	for i := 0; i < 3; i++ {
+		length := int(in[i*2]) | (int(in[i*2+1]) << 8)
+		if start+length >= len(in) {
+			return nil, errors.New("truncated input (or invalid offset)")
+		}
+		err = br[i].init(in[start : start+length])
+		if err != nil {
+			return nil, err
+		}
+		start += length
+	}
+	err = br[3].init(in[start:])
+	if err != nil {
+		return nil, err
+	}
+
+	// Prepare output
+	if cap(s.Out) < dstSize {
+		s.Out = make([]byte, 0, dstSize)
+	}
+	s.Out = s.Out[:dstSize]
+	// destination, offset to match first output
+	dstOut := s.Out
+	dstEvery := (dstSize + 3) / 4
+
+	const tlSize = 1 << tableLogMax
+	const tlMask = tlSize - 1
+	single := s.dt.single[:tlSize]
+
+	decode := func(br *bitReader) byte {
+		val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
+		v := single[val&tlMask]
+		br.bitsRead += uint8(v.entry)
+		return uint8(v.entry >> 8)
+	}
+
+	// Use temp table to avoid bound checks/append penalty.
+	var tmp = s.huffWeight[:256]
+	var off uint8
+	var decoded int
+
+	// Decode 2 values from each decoder/loop.
+	const bufoff = 256 / 4
+bigloop:
+	for {
+		for i := range br {
+			br := &br[i]
+			if br.off < 4 {
+				break bigloop
+			}
+			br.fillFast()
+		}
+
+		{
+			const stream = 0
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 1
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 2
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		{
+			const stream = 3
+			val := br[stream].peekBitsFast(s.actualTableLog)
+			v := single[val&tlMask]
+			br[stream].bitsRead += uint8(v.entry)
+
+			val2 := br[stream].peekBitsFast(s.actualTableLog)
+			v2 := single[val2&tlMask]
+			tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
+			tmp[off+bufoff*stream] = uint8(v.entry >> 8)
+			br[stream].bitsRead += uint8(v2.entry)
+		}
+
+		off += 2
+
+		if off == bufoff {
+			if bufoff > dstEvery {
+				return nil, errors.New("corruption detected: stream overrun 1")
+			}
+			copy(dstOut, tmp[:bufoff])
+			copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2])
+			copy(dstOut[dstEvery*2:], tmp[bufoff*2:bufoff*3])
+			copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4])
+			off = 0
+			dstOut = dstOut[bufoff:]
+			decoded += 256
+			// There must at least be 3 buffers left.
+			if len(dstOut) < dstEvery*3 {
+				return nil, errors.New("corruption detected: stream overrun 2")
+			}
+		}
+	}
+	if off > 0 {
+		ioff := int(off)
+		if len(dstOut) < dstEvery*3+ioff {
+			return nil, errors.New("corruption detected: stream overrun 3")
+		}
+		copy(dstOut, tmp[:off])
+		copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2])
+		copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3])
+		copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4])
+		decoded += int(off) * 4
+		dstOut = dstOut[off:]
+	}
+
+	// Decode remaining.
+	for i := range br {
+		offset := dstEvery * i
+		br := &br[i]
+		for !br.finished() {
+			br.fill()
+			if offset >= len(dstOut) {
+				return nil, errors.New("corruption detected: stream overrun 4")
+			}
+			dstOut[offset] = decode(br)
+			offset++
+		}
+		decoded += offset - dstEvery*i
+		err = br.close()
+		if err != nil {
+			return nil, err
+		}
+	}
+	if dstSize != decoded {
+		return nil, errors.New("corruption detected: short output block")
+	}
+	return s.Out, nil
+}
+
+// matches will compare a decoding table to a coding table.
+// Errors are written to the writer.
+// Nothing will be written if table is ok.
+func (s *Scratch) matches(ct cTable, w io.Writer) {
+	if s == nil || len(s.dt.single) == 0 {
+		return
+	}
+	dt := s.dt.single[:1<<s.actualTableLog]
+	tablelog := s.actualTableLog
+	ok := 0
+	broken := 0
+	for sym, enc := range ct {
+		errs := 0
+		broken++
+		if enc.nBits == 0 {
+			for _, dec := range dt {
+				if uint8(dec.entry>>8) == byte(sym) {
+					fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
+					errs++
+					break
+				}
+			}
+			if errs == 0 {
+				broken--
+			}
+			continue
+		}
+		// Unused bits in input
+		ub := tablelog - enc.nBits
+		top := enc.val << ub
+		// decoder looks at top bits.
+		dec := dt[top]
+		if uint8(dec.entry) != enc.nBits {
+			fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
+			errs++
+		}
+		if uint8(dec.entry>>8) != uint8(sym) {
+			fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
+			errs++
+		}
+		if errs > 0 {
+			fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+			continue
+		}
+		// Ensure that all combinations are covered.
+		for i := uint16(0); i < (1 << ub); i++ {
+			vval := top | i
+			dec := dt[vval]
+			if uint8(dec.entry) != enc.nBits {
+				fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
+				errs++
+			}
+			if uint8(dec.entry>>8) != uint8(sym) {
+				fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
+				errs++
+			}
+			if errs > 20 {
+				fmt.Fprintf(w, "%d errros, stopping\n", errs)
+				break
+			}
+		}
+		if errs == 0 {
+			ok++
+			broken--
+		}
+	}
+	if broken > 0 {
+		fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok)
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
new file mode 100644
index 0000000..53249df
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -0,0 +1,259 @@
+// Package huff0 provides fast huffman encoding as used in zstd.
+//
+// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
+package huff0
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/fse"
+)
+
+const (
+	maxSymbolValue = 255
+
+	// zstandard limits tablelog to 11, see:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
+	tableLogMax     = 11
+	tableLogDefault = 11
+	minTablelog     = 5
+	huffNodesLen    = 512
+
+	// BlockSizeMax is maximum input size for a single block uncompressed.
+	BlockSizeMax = 1<<18 - 1
+)
+
+var (
+	// ErrIncompressible is returned when input is judged to be too hard to compress.
+	ErrIncompressible = errors.New("input is not compressible")
+
+	// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+	ErrUseRLE = errors.New("input is single value repeated")
+
+	// ErrTooBig is return if input is too large for a single block.
+	ErrTooBig = errors.New("input too big")
+
+	// ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+	ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
+)
+
+type ReusePolicy uint8
+
+const (
+	// ReusePolicyAllow will allow reuse if it produces smaller output.
+	ReusePolicyAllow ReusePolicy = iota
+
+	// ReusePolicyPrefer will re-use aggressively if possible.
+	// This will not check if a new table will produce smaller output,
+	// except if the current table is impossible to use or
+	// compressed output is bigger than input.
+	ReusePolicyPrefer
+
+	// ReusePolicyNone will disable re-use of tables.
+	// This is slightly faster than ReusePolicyAllow but may produce larger output.
+	ReusePolicyNone
+)
+
+type Scratch struct {
+	count [maxSymbolValue + 1]uint32
+
+	// Per block parameters.
+	// These can be used to override compression parameters of the block.
+	// Do not touch, unless you know what you are doing.
+
+	// Out is output buffer.
+	// If the scratch is re-used before the caller is done processing the output,
+	// set this field to nil.
+	// Otherwise the output buffer will be re-used for next Compression/Decompression step
+	// and allocation will be avoided.
+	Out []byte
+
+	// OutTable will contain the table data only, if a new table has been generated.
+	// Slice of the returned data.
+	OutTable []byte
+
+	// OutData will contain the compressed data.
+	// Slice of the returned data.
+	OutData []byte
+
+	// MaxSymbolValue will override the maximum symbol value of the next block.
+	MaxSymbolValue uint8
+
+	// TableLog will attempt to override the tablelog for the next block.
+	// Must be <= 11 and >= 5.
+	TableLog uint8
+
+	// Reuse will specify the reuse policy
+	Reuse ReusePolicy
+
+	// WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+	// otherwise the block will be returned as incompressible.
+	// The reduction should then at least be (input size >> WantLogLess)
+	// If WantLogLess == 0 any improvement will do.
+	WantLogLess uint8
+
+	// MaxDecodedSize will set the maximum allowed output size.
+	// This value will automatically be set to BlockSizeMax if not set.
+	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+	MaxDecodedSize int
+
+	br             byteReader
+	symbolLen      uint16 // Length of active part of the symbol table.
+	maxCount       int    // count of the most probable symbol
+	clearCount     bool   // clear count
+	actualTableLog uint8  // Selected tablelog.
+	prevTableLog   uint8  // Tablelog for previous table
+	prevTable      cTable // Table used for previous compression.
+	cTable         cTable // compression table
+	dt             dTable // decompression table
+	nodes          []nodeElt
+	tmpOut         [4][]byte
+	fse            *fse.Scratch
+	huffWeight     [maxSymbolValue + 1]byte
+}
+
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+	if len(in) > BlockSizeMax {
+		return nil, ErrTooBig
+	}
+	if s == nil {
+		s = &Scratch{}
+	}
+	if s.MaxSymbolValue == 0 {
+		s.MaxSymbolValue = maxSymbolValue
+	}
+	if s.TableLog == 0 {
+		s.TableLog = tableLogDefault
+	}
+	if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+		return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
+	}
+	if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+		s.MaxDecodedSize = BlockSizeMax
+	}
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	if cap(s.Out) == 0 {
+		s.Out = make([]byte, 0, len(in))
+	}
+	s.Out = s.Out[:0]
+
+	s.OutTable = nil
+	s.OutData = nil
+	if cap(s.nodes) < huffNodesLen+1 {
+		s.nodes = make([]nodeElt, 0, huffNodesLen+1)
+	}
+	s.nodes = s.nodes[:0]
+	if s.fse == nil {
+		s.fse = &fse.Scratch{}
+	}
+	s.br.init(in)
+
+	return s, nil
+}
+
+type cTable []cTableEntry
+
+func (c cTable) write(s *Scratch) error {
+	var (
+		// precomputed conversion table
+		bitsToWeight [tableLogMax + 1]byte
+		huffLog      = s.actualTableLog
+		// last weight is not saved.
+		maxSymbolValue = uint8(s.symbolLen - 1)
+		huffWeight     = s.huffWeight[:256]
+	)
+	const (
+		maxFSETableLog = 6
+	)
+	// convert to weight
+	bitsToWeight[0] = 0
+	for n := uint8(1); n < huffLog+1; n++ {
+		bitsToWeight[n] = huffLog + 1 - n
+	}
+
+	// Acquire histogram for FSE.
+	hist := s.fse.Histogram()
+	hist = hist[:256]
+	for i := range hist[:16] {
+		hist[i] = 0
+	}
+	for n := uint8(0); n < maxSymbolValue; n++ {
+		v := bitsToWeight[c[n].nBits] & 15
+		huffWeight[n] = v
+		hist[v]++
+	}
+
+	// FSE compress if feasible.
+	if maxSymbolValue >= 2 {
+		huffMaxCnt := uint32(0)
+		huffMax := uint8(0)
+		for i, v := range hist[:16] {
+			if v == 0 {
+				continue
+			}
+			huffMax = byte(i)
+			if v > huffMaxCnt {
+				huffMaxCnt = v
+			}
+		}
+		s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+		s.fse.TableLog = maxFSETableLog
+		b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+		if err == nil && len(b) < int(s.symbolLen>>1) {
+			s.Out = append(s.Out, uint8(len(b)))
+			s.Out = append(s.Out, b...)
+			return nil
+		}
+		// Unable to compress (RLE/uncompressible)
+	}
+	// write raw values as 4-bits (max : 15)
+	if maxSymbolValue > (256 - 128) {
+		// should not happen : likely means source cannot be compressed
+		return ErrIncompressible
+	}
+	op := s.Out
+	// special case, pack weights 4 bits/weight.
+	op = append(op, 128|(maxSymbolValue-1))
+	// be sure it doesn't cause msan issue in final combination
+	huffWeight[maxSymbolValue] = 0
+	for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
+		op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
+	}
+	s.Out = op
+	return nil
+}
+
+// estimateSize returns the estimated size in bytes of the input represented in the
+// histogram supplied.
+func (c cTable) estimateSize(hist []uint32) int {
+	nbBits := uint32(7)
+	for i, v := range c[:len(hist)] {
+		nbBits += uint32(v.nBits) * hist[i]
+	}
+	return int(nbBits >> 3)
+}
+
+// minSize returns the minimum possible size considering the shannon limit.
+func (s *Scratch) minSize(total int) int {
+	nbBits := float64(7)
+	fTotal := float64(total)
+	for _, v := range s.count[:s.symbolLen] {
+		n := float64(v)
+		if n > 0 {
+			nbBits += math.Log2(fTotal/n) * n
+		}
+	}
+	return int(nbBits) >> 3
+}
+
+func highBit32(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s
new file mode 100644
index 0000000..1c66e37
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s
@@ -0,0 +1,482 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	CMPQ SI, R13
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	CMPQ SI, R13
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	CMPQ SI, R13
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	CMPQ SI, R13
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go
new file mode 100644
index 0000000..94a96c5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go
@@ -0,0 +1,115 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice.
+		// If no overlap, use the built-in copy:
+		if offset > length {
+			copy(dst[d:d+length], dst[d-offset:])
+			d += length
+			continue
+		}
+
+		// Unlike the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		//
+		// We align the slices into a and b and show the compiler they are the same size.
+		// This allows the loop to run without bounds checks.
+		a := dst[d : d+length]
+		b := dst[d-offset:]
+		b = b[:len(a)]
+		for i := range a {
+			a[i] = b[i]
+		}
+		d += length
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd
new file mode 100644
index 0000000..d24eb4b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd
@@ -0,0 +1,2 @@
+del old.txt
+go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt
diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go
new file mode 100644
index 0000000..74a3668
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
new file mode 100644
index 0000000..bc977a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -0,0 +1,393 @@
+# zstd 
+
+[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. 
+It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder.
+A high performance compression algorithm is implemented. For now focused on speed. 
+
+This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. 
+Note that custom dictionaries are not supported yet, so if your code relies on that, 
+you cannot use the package as-is.
+
+This package is pure Go and without use of "unsafe". 
+If a significant speedup can be achieved using "unsafe", it may be added as an option later.
+
+The `zstd` package is provided as open source software using a Go standard license.
+
+Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+
+## Installation
+
+Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
+
+Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
+
+
+## Compressor
+
+### Status: 
+
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively 
+used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases, 
+so as always, testing is recommended.  
+
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. 
+
+The "Fastest" compression ratio is roughly equivalent to zstd level 1. 
+The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
+
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. 
+The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+
+Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2.
+
+ 
+### Usage
+
+An Encoder can be used for either compressing a stream via the
+`io.WriteCloser` interface supported by the Encoder or as multiple independent
+tasks via the `EncodeAll` function.
+Smaller encodes are encouraged to use the EncodeAll function.
+Use `NewWriter` to create a new instance that can be used for both.
+
+To create a writer with default options, do like this:
+
+```Go
+// Compress input to output.
+func Compress(in io.Reader, out io.Writer) error {
+    w, err := NewWriter(output)
+    if err != nil {
+        return err
+    }
+    _, err := io.Copy(w, input)
+    if err != nil {
+        enc.Close()
+        return err
+    }
+    return enc.Close()
+}
+```
+
+Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called.
+Even if your encode fails, you should still call `Close()` to release any resources that may be held up.  
+
+The above is fine for big encodes. However, whenever possible try to *reuse* the writer.
+
+To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. 
+This will allow the encoder to reuse all resources and avoid wasteful allocations. 
+
+Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part 
+of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change 
+in the future. So if you want to limit concurrency for future updates, specify the concurrency
+you would like.
+
+You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined 
+compression settings can be specified.
+
+#### Future Compatibility Guarantees
+
+This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change.
+
+The goal will be to keep the default efficiency at the default zstd (level 3). 
+However the encoding should never be assumed to remain the same, 
+and you should not use hashes of compressed output for similarity checks.
+
+The Encoder can be assumed to produce the same output from the exact same code version.
+However, the may be modes in the future that break this, 
+although they will not be enabled without an explicit option.   
+
+This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder.
+
+Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59),
+[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) 
+and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames).
+
+#### Blocks
+
+For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
+
+`EncodeAll` will encode all input in src and append it to dst.
+This function can be called concurrently, but each call will only run on a single goroutine.
+
+Encoded blocks can be concatenated and the result will be the combined input stream.
+Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
+
+Especially when encoding blocks you should take special care to reuse the encoder. 
+This will effectively make it run without allocations after a warmup period. 
+To make it run completely without allocations, supply a destination buffer with space for all content.   
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a writer that caches compressors.
+// For this operation type we supply a nil Reader.
+var encoder, _ = zstd.NewWriter(nil)
+
+// Compress a buffer. 
+// If you have a destination buffer, the allocation in the call can also be eliminated.
+func Compress(src []byte) []byte {
+    return encoder.EncodeAll(src, make([]byte, 0, len(src)))
+} 
+```
+
+You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` 
+option when creating the writer.
+
+Using the Encoder for both a stream and individual blocks concurrently is safe. 
+
+### Performance
+
+I have collected some speed examples to compare speed and compression against other compressors.
+
+* `file` is the input file.
+* `out` is the compressor used. `zskp` is this package. `gzstd` is gzip standard library. `zstd` is the Datadog cgo library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default".
+* `insize`/`outsize` is the input/output size.
+* `millis` is the number of milliseconds used for compression.
+* `mb/s` is megabytes (2^20 bytes) per second.
+
+```
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
+
+file    out     level   insize  outsize     millis  mb/s
+enwik9  zskp    1   1000000000  343833033   5840    163.30
+enwik9  zskp    2   1000000000  317822183   8449    112.87
+enwik9  gzstd   1   1000000000  382578136   13627   69.98
+enwik9  gzstd   3   1000000000  349139651   22344   42.68
+enwik9  zstd    1   1000000000  357416379   4838    197.12
+enwik9  zstd    3   1000000000  313734522   7556    126.21
+
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+
+file        out level   insize      outsize     millis  mb/s
+gob-stream  zskp    1   1911399616  234981983   5100    357.42
+gob-stream  zskp    2   1911399616  208674003   6698    272.15
+gob-stream  gzstd   1   1911399616  357382641   14727   123.78
+gob-stream  gzstd   3   1911399616  327835097   17005   107.19
+gob-stream  zstd    1   1911399616  250787165   4075    447.22
+gob-stream  zstd    3   1911399616  208191888   5511    330.77
+
+Highly compressible JSON file. Similar to logs in a lot of ways.
+https://files.klauspost.com/compress/adresser.001.gz
+
+file            out level   insize      outsize     millis  mb/s
+adresser.001    zskp    1   1073741824  18510122    1477    692.83
+adresser.001    zskp    2   1073741824  19831697    1705    600.59
+adresser.001    gzstd   1   1073741824  47755503    3079    332.47
+adresser.001    gzstd   3   1073741824  40052381    3051    335.63
+adresser.001    zstd    1   1073741824  16135896    994     1030.18
+adresser.001    zstd    3   1073741824  17794465    905     1131.49
+
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
+
+file    out level   insize  outsize millis  mb/s
+rawstudio-mint14.tar    zskp    1   8558382592  3648168838  33398   244.38
+rawstudio-mint14.tar    zskp    2   8558382592  3376721436  50962   160.16
+rawstudio-mint14.tar    gzstd   1   8558382592  3926257486  84712   96.35
+rawstudio-mint14.tar    gzstd   3   8558382592  3740711978  176344  46.28
+rawstudio-mint14.tar    zstd    1   8558382592  3607859742  27903   292.51
+rawstudio-mint14.tar    zstd    3   8558382592  3341710879  46700   174.77
+
+
+The test data is designed to test archivers in realistic backup scenarios.
+http://mattmahoney.net/dc/10gb.html
+
+file    out level   insize  outsize millis  mb/s
+10gb.tar    zskp    1   10065157632 4883149814  45715   209.97
+10gb.tar    zskp    2   10065157632 4638110010  60970   157.44
+10gb.tar    gzstd   1   10065157632 5198296126  97769   98.18
+10gb.tar    gzstd   3   10065157632 4932665487  313427  30.63
+10gb.tar    zstd    1   10065157632 4940796535  40391   237.65
+10gb.tar    zstd    3   10065157632 4638618579  52911   181.42
+
+Silesia Corpus:
+http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
+
+file    out level   insize  outsize millis  mb/s
+silesia.tar zskp    1   211947520   73025800    1108    182.26
+silesia.tar zskp    2   211947520   67674684    1599    126.41
+silesia.tar gzstd   1   211947520   80007735    2515    80.37
+silesia.tar gzstd   3   211947520   73133380    4259    47.45
+silesia.tar zstd    1   211947520   73513991    933     216.64
+silesia.tar zstd    3   211947520   66793301    1377    146.79
+```
+
+### Converters
+
+As part of the development process a *Snappy* -> *Zstandard* converter was also built.
+
+This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. 
+Note that a single block is not framed.
+
+Conversion is done by converting the stream directly from Snappy without intermediate full decoding.
+Therefore the compression ratio is much less than what can be done by a full decompression
+and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+any errors being generated.
+No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+However, it provides really fast re-compression of Snappy streams.
+
+
+```
+BenchmarkSnappy_ConvertSilesia-8           1  1156001600 ns/op   183.35 MB/s
+Snappy len 103008711 -> zstd len 82687318
+
+BenchmarkSnappy_Enwik9-8           1  6472998400 ns/op   154.49 MB/s
+Snappy len 508028601 -> zstd len 390921079
+```
+
+
+```Go
+    s := zstd.SnappyConverter{}
+    n, err = s.Convert(input, output)
+    if err != nil {
+        fmt.Println("Re-compressed stream to", n, "bytes")
+    }
+```
+
+The converter `s` can be reused to avoid allocations, even after errors.
+
+
+## Decompressor
+
+Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, 
+or run it past its limits with ANY input provided.  
+ 
+### Usage
+
+The package has been designed for two main usages, big streams of data and smaller in-memory buffers. 
+There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`.
+
+For streaming use a simple setup could look like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+func Decompress(in io.Reader, out io.Writer) error {
+    d, err := zstd.NewReader(input)
+    if err != nil {
+        return err
+    }
+    defer d.Close()
+    
+    // Copy content...
+    _, err := io.Copy(out, d)
+    return err
+}
+```
+
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. 
+See "Allocation-less operation" below.
+
+For decoding buffers, it could look something like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a reader that caches decompressors.
+// For this operation type we supply a nil Reader.
+var decoder, _ = zstd.NewReader(nil)
+
+// Decompress a buffer. We don't supply a destination buffer,
+// so it will be allocated by the decoder.
+func Decompress(src []byte) ([]byte, error) {
+    return decoder.DecodeAll(src, nil)
+} 
+```
+
+Both of these cases should provide the functionality needed. 
+The decoder can be used for *concurrent* decompression of multiple buffers. 
+It will only allow a certain number of concurrent operations to run. 
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.   
+
+### Allocation-less operation
+
+The decoder has been designed to operate without allocations after a warmup. 
+
+This means that you should *store* the decoder for best performance. 
+To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream.
+A decoder can safely be re-used even if the previous stream failed.
+
+To release the resources, you must call the `Close()` function on a decoder.
+After this it can *no longer be reused*, but all running goroutines will be stopped.
+So you *must* use this if you will no longer need the Reader.
+
+For decompressing smaller buffers a single decoder can be used.
+When decoding buffers, you can supply a destination slice with length 0 and your expected capacity.
+In this case no unneeded allocations should be made. 
+
+### Concurrency
+
+The buffer decoder does everything on the same goroutine and does nothing concurrently.
+It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
+
+The stream decoder operates on
+
+* One goroutine reads input and splits the input to several block decoders.
+* A number of decoders will decode blocks.
+* A goroutine coordinates these blocks and sends history from one to the next.
+
+So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+
+Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
+
+In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
+ 
+ 
+### Benchmarks
+
+These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
+
+The first two are streaming decodes and the last are smaller inputs. 
+ 
+```
+BenchmarkDecoderSilesia-8             20       642550210 ns/op   329.85 MB/s      3101 B/op        8 allocs/op
+BenchmarkDecoderSilesiaCgo-8         100       384930000 ns/op   550.61 MB/s    451878 B/op     9713 allocs/op
+
+BenchmarkDecoderEnwik9-2              10        3146000080 ns/op         317.86 MB/s        2649 B/op          9 allocs/op
+BenchmarkDecoderEnwik9Cgo-2           20        1905900000 ns/op         524.69 MB/s     1125120 B/op      45785 allocs/op
+
+BenchmarkDecoder_DecodeAll/z000000.zst-8               200     7049994 ns/op   138.26 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000001.zst-8            100000       19560 ns/op    97.49 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000002.zst-8              5000      297599 ns/op   236.99 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000003.zst-8              2000      725502 ns/op   141.17 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000004.zst-8            200000        9314 ns/op    54.54 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000005.zst-8             10000      137500 ns/op   104.72 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000006.zst-8               500     2316009 ns/op   206.06 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000007.zst-8             20000       64499 ns/op   344.90 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000008.zst-8             50000       24900 ns/op   219.56 MB/s        40 B/op        2 allocs/op
+BenchmarkDecoder_DecodeAll/z000009.zst-8              1000     2348999 ns/op   154.01 MB/s        40 B/op        2 allocs/op
+
+BenchmarkDecoder_DecodeAllCgo/z000000.zst-8            500     4268005 ns/op   228.38 MB/s   1228849 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000001.zst-8         100000       15250 ns/op   125.05 MB/s      2096 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000002.zst-8          10000      147399 ns/op   478.49 MB/s     73776 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000003.zst-8           5000      320798 ns/op   319.27 MB/s    139312 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000004.zst-8         200000       10004 ns/op    50.77 MB/s       560 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000005.zst-8          20000       73599 ns/op   195.64 MB/s     19120 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000006.zst-8           1000     1119003 ns/op   426.48 MB/s    557104 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000007.zst-8          20000      103450 ns/op   215.04 MB/s     71296 B/op        9 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000008.zst-8         100000       20130 ns/op   271.58 MB/s      6192 B/op        3 allocs/op
+BenchmarkDecoder_DecodeAllCgo/z000009.zst-8           2000     1123500 ns/op   322.00 MB/s    368688 B/op        3 allocs/op
+```
+
+This reflects the performance around May 2019, but this may be out of date.
+
+# Contributions
+
+Contributions are always welcome. 
+For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
+
+For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files.
+
+For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
+
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
new file mode 100644
index 0000000..15d79d4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -0,0 +1,121 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"io"
+	"math/bits"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+	in       []byte
+	off      uint   // next byte to read is at in[off - 1]
+	value    uint64 // Maybe use [16]byte, but shifting is awkward.
+	bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+	if len(in) < 1 {
+		return errors.New("corrupt stream: too short")
+	}
+	b.in = in
+	b.off = uint(len(in))
+	// The highest bit of the last byte indicates where to start
+	v := in[len(in)-1]
+	if v == 0 {
+		return errors.New("corrupt stream, did not find end of stream")
+	}
+	b.bitsRead = 64
+	b.value = 0
+	b.fill()
+	b.fill()
+	b.bitsRead += 8 - uint8(highBits(uint32(v)))
+	return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) int {
+	if n == 0 /*|| b.bitsRead >= 64 */ {
+		return 0
+	}
+	return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) int {
+	const regMask = 64 - 1
+	v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+	b.bitsRead += n
+	return int(v)
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+	if b.bitsRead < 32 {
+		return
+	}
+	// Do single re-slice to avoid bounds checks.
+	v := b.in[b.off-4 : b.off]
+	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	b.value = (b.value << 32) | uint64(low)
+	b.bitsRead -= 32
+	b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+	if b.bitsRead < 32 {
+		return
+	}
+	if b.off >= 4 {
+		v := b.in[b.off-4 : b.off]
+		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		b.value = (b.value << 32) | uint64(low)
+		b.bitsRead -= 32
+		b.off -= 4
+		return
+	}
+	for b.off > 0 {
+		b.value = (b.value << 8) | uint64(b.in[b.off-1])
+		b.bitsRead -= 8
+		b.off--
+	}
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+	return b.off == 0 && b.bitsRead >= 64
+}
+
+// overread returns true if more bits have been requested than is on the stream.
+func (b *bitReader) overread() bool {
+	return b.bitsRead > 64
+}
+
+// remain returns the number of bits remaining.
+func (b *bitReader) remain() uint {
+	return b.off*8 + 64 - uint(b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+	// Release reference.
+	b.in = nil
+	if b.bitsRead > 64 {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func highBits(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
new file mode 100644
index 0000000..303ae90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+	bitContainer uint64
+	nBits        uint8
+	out          []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+	0, 1, 3, 7, 0xF, 0x1F,
+	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+	0xFFFF, 0xFFFF} /* up to 16 bits */
+
+var bitMask32 = [32]uint32{
+	0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+	0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+	0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+	0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits32NC will add up to 32 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
+	b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+	b.bitContainer |= uint64(value) << (b.nBits & 63)
+	b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+	v := b.nBits >> 3
+	switch v {
+	case 0:
+	case 1:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+		)
+	case 2:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+		)
+	case 3:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+		)
+	case 4:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+		)
+	case 5:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+		)
+	case 6:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+		)
+	case 7:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+		)
+	case 8:
+		b.out = append(b.out,
+			byte(b.bitContainer),
+			byte(b.bitContainer>>8),
+			byte(b.bitContainer>>16),
+			byte(b.bitContainer>>24),
+			byte(b.bitContainer>>32),
+			byte(b.bitContainer>>40),
+			byte(b.bitContainer>>48),
+			byte(b.bitContainer>>56),
+		)
+	default:
+		panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+	}
+	b.bitContainer >>= v << 3
+	b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+	if b.nBits < 32 {
+		return
+	}
+	b.out = append(b.out,
+		byte(b.bitContainer),
+		byte(b.bitContainer>>8),
+		byte(b.bitContainer>>16),
+		byte(b.bitContainer>>24))
+	b.nBits -= 32
+	b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+	nbBytes := (b.nBits + 7) >> 3
+	for i := uint8(0); i < nbBytes; i++ {
+		b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+	}
+	b.nBits = 0
+	b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() error {
+	// End mark
+	b.addBits16Clean(1, 1)
+	// flush until next byte.
+	b.flushAlign()
+	return nil
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+	b.bitContainer = 0
+	b.nBits = 0
+	b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
new file mode 100644
index 0000000..ed670bc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -0,0 +1,716 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/huff0"
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type blockType uint8
+
+//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
+
+const (
+	blockTypeRaw blockType = iota
+	blockTypeRLE
+	blockTypeCompressed
+	blockTypeReserved
+)
+
+type literalsBlockType uint8
+
+const (
+	literalsBlockRaw literalsBlockType = iota
+	literalsBlockRLE
+	literalsBlockCompressed
+	literalsBlockTreeless
+)
+
+const (
+	// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
+	maxCompressedBlockSize = 128 << 10
+
+	// Maximum possible block size (all Raw+Uncompressed).
+	maxBlockSize = (1 << 21) - 1
+
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
+	maxCompressedLiteralSize = 1 << 18
+	maxRLELiteralSize        = 1 << 20
+	maxMatchLen              = 131074
+	maxSequences             = 0x7f00 + 0xffff
+
+	// We support slightly less than the reference decoder to be able to
+	// use ints on 32 bit archs.
+	maxOffsetBits = 30
+)
+
+var (
+	huffDecoderPool = sync.Pool{New: func() interface{} {
+		return &huff0.Scratch{}
+	}}
+
+	fseDecoderPool = sync.Pool{New: func() interface{} {
+		return &fseDecoder{}
+	}}
+)
+
+type blockDec struct {
+	// Raw source data of the block.
+	data        []byte
+	dataStorage []byte
+
+	// Destination of the decoded data.
+	dst []byte
+
+	// Buffer for literals data.
+	literalBuf []byte
+
+	// Window size of the block.
+	WindowSize uint64
+	Type       blockType
+	RLESize    uint32
+
+	// Is this the last block of a frame?
+	Last bool
+
+	// Use less memory
+	lowMem      bool
+	history     chan *history
+	input       chan struct{}
+	result      chan decodeOutput
+	sequenceBuf []seq
+	tmp         [4]byte
+	err         error
+	decWG       sync.WaitGroup
+}
+
+func (b *blockDec) String() string {
+	if b == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
+}
+
+func newBlockDec(lowMem bool) *blockDec {
+	b := blockDec{
+		lowMem:  lowMem,
+		result:  make(chan decodeOutput, 1),
+		input:   make(chan struct{}, 1),
+		history: make(chan *history, 1),
+	}
+	b.decWG.Add(1)
+	go b.startDecoder()
+	return &b
+}
+
+// reset will reset the block.
+// Input must be a start of a block and will be at the end of the block when returned.
+func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
+	b.WindowSize = windowSize
+	tmp := br.readSmall(3)
+	if tmp == nil {
+		if debug {
+			println("Reading block header:", io.ErrUnexpectedEOF)
+		}
+		return io.ErrUnexpectedEOF
+	}
+	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+	b.Last = bh&1 != 0
+	b.Type = blockType((bh >> 1) & 3)
+	// find size.
+	cSize := int(bh >> 3)
+	switch b.Type {
+	case blockTypeReserved:
+		return ErrReservedBlockType
+	case blockTypeRLE:
+		b.RLESize = uint32(cSize)
+		cSize = 1
+	case blockTypeCompressed:
+		if debug {
+			println("Data size on stream:", cSize)
+		}
+		b.RLESize = 0
+		if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
+			if debug {
+				printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
+			}
+			return ErrCompressedSizeTooBig
+		}
+	default:
+		b.RLESize = 0
+	}
+
+	// Read block data.
+	if cap(b.dataStorage) < cSize {
+		if b.lowMem {
+			b.dataStorage = make([]byte, 0, cSize)
+		} else {
+			b.dataStorage = make([]byte, 0, maxBlockSize)
+		}
+	}
+	if cap(b.dst) <= maxBlockSize {
+		b.dst = make([]byte, 0, maxBlockSize+1)
+	}
+	var err error
+	b.data, err = br.readBig(cSize, b.dataStorage)
+	if err != nil {
+		if debug {
+			println("Reading block:", err, "(", cSize, ")", len(b.data))
+			printf("%T", br)
+		}
+		return err
+	}
+	return nil
+}
+
+// sendEOF will make the decoder send EOF on this frame.
+func (b *blockDec) sendErr(err error) {
+	b.Last = true
+	b.Type = blockTypeReserved
+	b.err = err
+	b.input <- struct{}{}
+}
+
+// Close will release resources.
+// Closed blockDec cannot be reset.
+func (b *blockDec) Close() {
+	close(b.input)
+	close(b.history)
+	close(b.result)
+	b.decWG.Wait()
+}
+
+// decodeAsync will prepare decoding the block when it receives input.
+// This will separate output and history.
+func (b *blockDec) startDecoder() {
+	defer b.decWG.Done()
+	for range b.input {
+		//println("blockDec: Got block input")
+		switch b.Type {
+		case blockTypeRLE:
+			if cap(b.dst) < int(b.RLESize) {
+				if b.lowMem {
+					b.dst = make([]byte, b.RLESize)
+				} else {
+					b.dst = make([]byte, maxBlockSize)
+				}
+			}
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst[:b.RLESize],
+				err: nil,
+			}
+			v := b.data[0]
+			for i := range o.b {
+				o.b[i] = v
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeRaw:
+			o := decodeOutput{
+				d:   b,
+				b:   b.data,
+				err: nil,
+			}
+			hist := <-b.history
+			hist.append(o.b)
+			b.result <- o
+		case blockTypeCompressed:
+			b.dst = b.dst[:0]
+			err := b.decodeCompressed(nil)
+			o := decodeOutput{
+				d:   b,
+				b:   b.dst,
+				err: err,
+			}
+			if debug {
+				println("Decompressed to", len(b.dst), "bytes, error:", err)
+			}
+			b.result <- o
+		case blockTypeReserved:
+			// Used for returning errors.
+			<-b.history
+			b.result <- decodeOutput{
+				d:   b,
+				b:   nil,
+				err: b.err,
+			}
+		default:
+			panic("Invalid block type")
+		}
+		if debug {
+			println("blockDec: Finished block")
+		}
+	}
+}
+
+// decodeAsync will prepare decoding the block when it receives the history.
+// If history is provided, it will not fetch it from the channel.
+func (b *blockDec) decodeBuf(hist *history) error {
+	switch b.Type {
+	case blockTypeRLE:
+		if cap(b.dst) < int(b.RLESize) {
+			if b.lowMem {
+				b.dst = make([]byte, b.RLESize)
+			} else {
+				b.dst = make([]byte, maxBlockSize)
+			}
+		}
+		b.dst = b.dst[:b.RLESize]
+		v := b.data[0]
+		for i := range b.dst {
+			b.dst[i] = v
+		}
+		hist.appendKeep(b.dst)
+		return nil
+	case blockTypeRaw:
+		hist.appendKeep(b.data)
+		return nil
+	case blockTypeCompressed:
+		saved := b.dst
+		b.dst = hist.b
+		hist.b = nil
+		err := b.decodeCompressed(hist)
+		if debug {
+			println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
+		}
+		hist.b = b.dst
+		b.dst = saved
+		return err
+	case blockTypeReserved:
+		// Used for returning errors.
+		return b.err
+	default:
+		panic("Invalid block type")
+	}
+}
+
+// decodeCompressed will start decompressing a block.
+// If no history is supplied the decoder will decodeAsync as much as possible
+// before fetching from blockDec.history
+func (b *blockDec) decodeCompressed(hist *history) error {
+	in := b.data
+	delayedHistory := hist == nil
+
+	if delayedHistory {
+		// We must always grab history.
+		defer func() {
+			if hist == nil {
+				<-b.history
+			}
+		}()
+	}
+	// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
+	if len(in) < 2 {
+		return ErrBlockTooSmall
+	}
+	litType := literalsBlockType(in[0] & 3)
+	var litRegenSize int
+	var litCompSize int
+	sizeFormat := (in[0] >> 2) & 3
+	var fourStreams bool
+	switch litType {
+	case literalsBlockRaw, literalsBlockRLE:
+		switch sizeFormat {
+		case 0, 2:
+			// Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
+			litRegenSize = int(in[0] >> 3)
+			in = in[1:]
+		case 1:
+			// Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
+			in = in[2:]
+		case 3:
+			//  Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
+			in = in[3:]
+		}
+	case literalsBlockCompressed, literalsBlockTreeless:
+		switch sizeFormat {
+		case 0, 1:
+			// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
+			if len(in) < 3 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
+			litRegenSize = int(n & 1023)
+			litCompSize = int(n >> 10)
+			fourStreams = sizeFormat == 1
+			in = in[3:]
+		case 2:
+			fourStreams = true
+			if len(in) < 4 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
+			litRegenSize = int(n & 16383)
+			litCompSize = int(n >> 14)
+			in = in[4:]
+		case 3:
+			fourStreams = true
+			if len(in) < 5 {
+				println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+				return ErrBlockTooSmall
+			}
+			n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
+			litRegenSize = int(n & 262143)
+			litCompSize = int(n >> 18)
+			in = in[5:]
+		}
+	}
+	if debug {
+		println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
+	}
+	var literals []byte
+	var huff *huff0.Scratch
+	switch litType {
+	case literalsBlockRaw:
+		if len(in) < litRegenSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litRegenSize]
+		in = in[litRegenSize:]
+		//printf("Found %d uncompressed literals\n", litRegenSize)
+	case literalsBlockRLE:
+		if len(in) < 1 {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
+			return ErrBlockTooSmall
+		}
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, litRegenSize)
+			} else {
+				if litRegenSize > maxCompressedLiteralSize {
+					// Exceptional
+					b.literalBuf = make([]byte, litRegenSize)
+				} else {
+					b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
+
+				}
+			}
+		}
+		literals = b.literalBuf[:litRegenSize]
+		v := in[0]
+		for i := range literals {
+			literals[i] = v
+		}
+		in = in[1:]
+		if debug {
+			printf("Found %d RLE compressed literals\n", litRegenSize)
+		}
+	case literalsBlockTreeless:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		// Store compressed literals, so we defer decoding until we get history.
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		if debug {
+			printf("Found %d compressed literals\n", litCompSize)
+		}
+	case literalsBlockCompressed:
+		if len(in) < litCompSize {
+			println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+			return ErrBlockTooSmall
+		}
+		literals = in[:litCompSize]
+		in = in[litCompSize:]
+		huff = huffDecoderPool.Get().(*huff0.Scratch)
+		var err error
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		if huff == nil {
+			huff = &huff0.Scratch{}
+		}
+		huff.Out = b.literalBuf[:0]
+		huff, literals, err = huff0.ReadTable(literals, huff)
+		if err != nil {
+			println("reading huffman table:", err)
+			return err
+		}
+		// Use our out buffer.
+		huff.Out = b.literalBuf[:0]
+		huff.MaxDecodedSize = litRegenSize
+		if fourStreams {
+			literals, err = huff.Decompress4X(literals, litRegenSize)
+		} else {
+			literals, err = huff.Decompress1X(literals)
+		}
+		if err != nil {
+			println("decoding compressed literals:", err)
+			return err
+		}
+		// Make sure we don't leak our literals buffer
+		huff.Out = nil
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+		if debug {
+			printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
+		}
+	}
+
+	// Decode Sequences
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
+	if len(in) < 1 {
+		return ErrBlockTooSmall
+	}
+	seqHeader := in[0]
+	nSeqs := 0
+	switch {
+	case seqHeader == 0:
+		in = in[1:]
+	case seqHeader < 128:
+		nSeqs = int(seqHeader)
+		in = in[1:]
+	case seqHeader < 255:
+		if len(in) < 2 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = int(seqHeader-128)<<8 | int(in[1])
+		in = in[2:]
+	case seqHeader == 255:
+		if len(in) < 3 {
+			return ErrBlockTooSmall
+		}
+		nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
+		in = in[3:]
+	}
+	// Allocate sequences
+	if cap(b.sequenceBuf) < nSeqs {
+		if b.lowMem {
+			b.sequenceBuf = make([]seq, nSeqs)
+		} else {
+			// Allocate max
+			b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+		}
+	} else {
+		// Reuse buffer
+		b.sequenceBuf = b.sequenceBuf[:nSeqs]
+	}
+	var seqs = &sequenceDecs{}
+	if nSeqs > 0 {
+		if len(in) < 1 {
+			return ErrBlockTooSmall
+		}
+		br := byteReader{b: in, off: 0}
+		compMode := br.Uint8()
+		br.advance(1)
+		if debug {
+			printf("Compression modes: 0b%b", compMode)
+		}
+		for i := uint(0); i < 3; i++ {
+			mode := seqCompMode((compMode >> (6 - i*2)) & 3)
+			if debug {
+				println("Table", tableIndex(i), "is", mode)
+			}
+			var seq *sequenceDec
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				seq = &seqs.litLengths
+			case tableOffsets:
+				seq = &seqs.offsets
+			case tableMatchLengths:
+				seq = &seqs.matchLengths
+			default:
+				panic("unknown table")
+			}
+			switch mode {
+			case compModePredefined:
+				seq.fse = &fsePredef[i]
+			case compModeRLE:
+				if br.remain() < 1 {
+					return ErrBlockTooSmall
+				}
+				v := br.Uint8()
+				br.advance(1)
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				symb, err := decSymbolValue(v, symbolTableX[i])
+				if err != nil {
+					printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
+					return err
+				}
+				dec.setRLE(symb)
+				seq.fse = dec
+				if debug {
+					printf("RLE set to %+v, code: %v", symb, v)
+				}
+			case compModeFSE:
+				println("Reading table for", tableIndex(i))
+				dec := fseDecoderPool.Get().(*fseDecoder)
+				err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+				if err != nil {
+					println("Read table error:", err)
+					return err
+				}
+				err = dec.transform(symbolTableX[i])
+				if err != nil {
+					println("Transform table error:", err)
+					return err
+				}
+				if debug {
+					println("Read table ok", "symbolLen:", dec.symbolLen)
+				}
+				seq.fse = dec
+			case compModeRepeat:
+				seq.repeat = true
+			}
+			if br.overread() {
+				return io.ErrUnexpectedEOF
+			}
+		}
+		in = br.unread()
+	}
+
+	// Wait for history.
+	// All time spent after this is critical since it is strictly sequential.
+	if hist == nil {
+		hist = <-b.history
+		if hist.error {
+			return ErrDecoderClosed
+		}
+	}
+
+	// Decode treeless literal block.
+	if litType == literalsBlockTreeless {
+		// TODO: We could send the history early WITHOUT the stream history.
+		//   This would allow decoding treeless literials before the byte history is available.
+		//   Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
+		//   So not much obvious gain here.
+
+		if hist.huffTree == nil {
+			return errors.New("literal block was treeless, but no history was defined")
+		}
+		// Ensure we have space to store it.
+		if cap(b.literalBuf) < litRegenSize {
+			if b.lowMem {
+				b.literalBuf = make([]byte, 0, litRegenSize)
+			} else {
+				b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+			}
+		}
+		var err error
+		// Use our out buffer.
+		huff = hist.huffTree
+		huff.Out = b.literalBuf[:0]
+		huff.MaxDecodedSize = litRegenSize
+		if fourStreams {
+			literals, err = huff.Decompress4X(literals, litRegenSize)
+		} else {
+			literals, err = huff.Decompress1X(literals)
+		}
+		// Make sure we don't leak our literals buffer
+		huff.Out = nil
+		if err != nil {
+			println("decompressing literals:", err)
+			return err
+		}
+		if len(literals) != litRegenSize {
+			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+		}
+	} else {
+		if hist.huffTree != nil && huff != nil {
+			huffDecoderPool.Put(hist.huffTree)
+			hist.huffTree = nil
+		}
+	}
+	if huff != nil {
+		huff.Out = nil
+		hist.huffTree = huff
+	}
+	if debug {
+		println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+	}
+
+	if nSeqs == 0 {
+		// Decompressed content is defined entirely as Literals Section content.
+		b.dst = append(b.dst, literals...)
+		if delayedHistory {
+			hist.append(literals)
+		}
+		return nil
+	}
+
+	seqs, err := seqs.mergeHistory(&hist.decoders)
+	if err != nil {
+		return err
+	}
+	if debug {
+		println("History merged ok")
+	}
+	br := &bitReader{}
+	if err := br.init(in); err != nil {
+		return err
+	}
+
+	// TODO: Investigate if sending history without decoders are faster.
+	//   This would allow the sequences to be decoded async and only have to construct stream history.
+	//   If only recent offsets were not transferred, this would be an obvious win.
+	// 	 Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+
+	if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
+		println("initializing sequences:", err)
+		return err
+	}
+
+	err = seqs.decode(nSeqs, br, hist.b)
+	if err != nil {
+		return err
+	}
+	if !br.finished() {
+		return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
+	}
+
+	err = br.close()
+	if err != nil {
+		printf("Closing sequences: %v, %+v\n", err, *br)
+	}
+	if len(b.data) > maxCompressedBlockSize {
+		return fmt.Errorf("compressed block size too large (%d)", len(b.data))
+	}
+	// Set output and release references.
+	b.dst = seqs.out
+	seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+
+	if !delayedHistory {
+		// If we don't have delayed history, no need to update.
+		hist.recentOffsets = seqs.prevOffset
+		return nil
+	}
+	if b.Last {
+		// if last block we don't care about history.
+		println("Last block, no history returned")
+		hist.b = hist.b[:0]
+		return nil
+	}
+	hist.append(b.dst)
+	hist.recentOffsets = seqs.prevOffset
+	if debug {
+		println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
new file mode 100644
index 0000000..507757d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -0,0 +1,837 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/bits"
+
+	"github.com/klauspost/compress/huff0"
+)
+
+type blockEnc struct {
+	size      int
+	literals  []byte
+	sequences []seq
+	coders    seqCoders
+	litEnc    *huff0.Scratch
+	wr        bitWriter
+
+	extraLits int
+	last      bool
+
+	output            []byte
+	recentOffsets     [3]uint32
+	prevRecentOffsets [3]uint32
+}
+
+// init should be used once the block has been created.
+// If called more than once, the effect is the same as calling reset.
+func (b *blockEnc) init() {
+	if cap(b.literals) < maxCompressedLiteralSize {
+		b.literals = make([]byte, 0, maxCompressedLiteralSize)
+	}
+	const defSeqs = 200
+	b.literals = b.literals[:0]
+	if cap(b.sequences) < defSeqs {
+		b.sequences = make([]seq, 0, defSeqs)
+	}
+	if cap(b.output) < maxCompressedBlockSize {
+		b.output = make([]byte, 0, maxCompressedBlockSize)
+	}
+	if b.coders.mlEnc == nil {
+		b.coders.mlEnc = &fseEncoder{}
+		b.coders.mlPrev = &fseEncoder{}
+		b.coders.ofEnc = &fseEncoder{}
+		b.coders.ofPrev = &fseEncoder{}
+		b.coders.llEnc = &fseEncoder{}
+		b.coders.llPrev = &fseEncoder{}
+	}
+	b.litEnc = &huff0.Scratch{WantLogLess: 4}
+	b.reset(nil)
+}
+
+// initNewEncode can be used to reset offsets and encoders to the initial state.
+func (b *blockEnc) initNewEncode() {
+	b.recentOffsets = [3]uint32{1, 4, 8}
+	b.litEnc.Reuse = huff0.ReusePolicyNone
+	b.coders.setPrev(nil, nil, nil)
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) reset(prev *blockEnc) {
+	b.extraLits = 0
+	b.literals = b.literals[:0]
+	b.size = 0
+	b.sequences = b.sequences[:0]
+	b.output = b.output[:0]
+	b.last = false
+	if prev != nil {
+		b.recentOffsets = prev.prevRecentOffsets
+	}
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) swapEncoders(prev *blockEnc) {
+	b.coders.swap(&prev.coders)
+	b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
+}
+
+// blockHeader contains the information for a block header.
+type blockHeader uint32
+
+// setLast sets the 'last' indicator on a block.
+func (h *blockHeader) setLast(b bool) {
+	if b {
+		*h = *h | 1
+	} else {
+		const mask = (1 << 24) - 2
+		*h = *h & mask
+	}
+}
+
+// setSize will store the compressed size of a block.
+func (h *blockHeader) setSize(v uint32) {
+	const mask = 7
+	*h = (*h)&mask | blockHeader(v<<3)
+}
+
+// setType sets the block type.
+func (h *blockHeader) setType(t blockType) {
+	const mask = 1 | (((1 << 24) - 1) ^ 7)
+	*h = (*h & mask) | blockHeader(t<<1)
+}
+
+// appendTo will append the block header to a slice.
+func (h blockHeader) appendTo(b []byte) []byte {
+	return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+}
+
+// String returns a string representation of the block.
+func (h blockHeader) String() string {
+	return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
+}
+
+// literalsHeader contains literals header information.
+type literalsHeader uint64
+
+// setType can be used to set the type of literal block.
+func (h *literalsHeader) setType(t literalsBlockType) {
+	const mask = math.MaxUint64 - 3
+	*h = (*h & mask) | literalsHeader(t)
+}
+
+// setSize can be used to set a single size, for uncompressed and RLE content.
+func (h *literalsHeader) setSize(regenLen int) {
+	inBits := bits.Len32(uint32(regenLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case inBits < 5:
+		lh |= (uint64(regenLen) << 3) | (1 << 60)
+		if debug {
+			got := int(lh>>3) & 0xff
+			if got != regenLen {
+				panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
+			}
+		}
+	case inBits < 12:
+		lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
+	case inBits < 20:
+		lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
+	default:
+		panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
+	}
+	*h = literalsHeader(lh)
+}
+
+// setSizes will set the size of a compressed literals section and the input length.
+func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
+	compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
+	// Only retain 2 bits
+	const mask = 3
+	lh := uint64(*h & mask)
+	switch {
+	case compBits <= 10 && inBits <= 10:
+		if !single {
+			lh |= 1 << 2
+		}
+		lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+		if debug {
+			const mmask = (1 << 24) - 1
+			n := (lh >> 4) & mmask
+			if int(n&1023) != inLen {
+				panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
+			}
+			if int(n>>10) != compLen {
+				panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
+			}
+		}
+	case compBits <= 14 && inBits <= 14:
+		lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	case compBits <= 18 && inBits <= 18:
+		lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+		if single {
+			panic("single stream used with more than 10 bits length.")
+		}
+	default:
+		panic("internal error: block too big")
+	}
+	*h = literalsHeader(lh)
+}
+
+// appendTo will append the literals header to a byte slice.
+func (h literalsHeader) appendTo(b []byte) []byte {
+	size := uint8(h >> 60)
+	switch size {
+	case 1:
+		b = append(b, uint8(h))
+	case 2:
+		b = append(b, uint8(h), uint8(h>>8))
+	case 3:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+	case 4:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
+	case 5:
+		b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
+	default:
+		panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
+	}
+	return b
+}
+
+// size returns the output size with currently set values.
+func (h literalsHeader) size() int {
+	return int(h >> 60)
+}
+
+func (h literalsHeader) String() string {
+	return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) pushOffsets() {
+	b.prevRecentOffsets = b.recentOffsets
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) popOffsets() {
+	b.recentOffsets = b.prevRecentOffsets
+}
+
+// matchOffset will adjust recent offsets and return the adjusted one,
+// if it matches a previous offset.
+func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
+	// Check if offset is one of the recent offsets.
+	// Adjusts the output offset accordingly.
+	// Gives a tiny bit of compression, typically around 1%.
+	if true {
+		if lits > 0 {
+			switch offset {
+			case b.recentOffsets[0]:
+				offset = 1
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		} else {
+			switch offset {
+			case b.recentOffsets[1]:
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 1
+			case b.recentOffsets[2]:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 2
+			case b.recentOffsets[0] - 1:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset = 3
+			default:
+				b.recentOffsets[2] = b.recentOffsets[1]
+				b.recentOffsets[1] = b.recentOffsets[0]
+				b.recentOffsets[0] = offset
+				offset += 3
+			}
+		}
+	} else {
+		offset += 3
+	}
+	return offset
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRaw(a []byte) {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(a)))
+	bh.setType(blockTypeRaw)
+	b.output = bh.appendTo(b.output[:0])
+	b.output = append(b.output, a...)
+	if debug {
+		println("Adding RAW block, length", len(a))
+	}
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(src)))
+	bh.setType(blockTypeRaw)
+	dst = bh.appendTo(dst)
+	dst = append(dst, src...)
+	if debug {
+		println("Adding RAW block, length", len(src))
+	}
+	return dst
+}
+
+// encodeLits can be used if the block is only litLen.
+func (b *blockEnc) encodeLits(raw bool) error {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(uint32(len(b.literals)))
+
+	// Don't compress extremely small blocks
+	if len(b.literals) < 32 || raw {
+		if debug {
+			println("Adding RAW block, length", len(b.literals))
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, b.literals...)
+		return nil
+	}
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if len(b.literals) >= 1024 {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+	} else if len(b.literals) > 32 {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		if debug {
+			println("Adding RAW block, length", len(b.literals))
+		}
+		bh.setType(blockTypeRaw)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, b.literals...)
+		return nil
+	case huff0.ErrUseRLE:
+		if debug {
+			println("Adding RLE block, length", len(b.literals))
+		}
+		bh.setType(blockTypeRLE)
+		b.output = bh.appendTo(b.output)
+		b.output = append(b.output, b.literals[0])
+		return nil
+	default:
+		return err
+	case nil:
+	}
+	// Compressed...
+	// Now, allow reuse
+	b.litEnc.Reuse = huff0.ReusePolicyAllow
+	bh.setType(blockTypeCompressed)
+	var lh literalsHeader
+	if reUsed {
+		if debug {
+			println("Reused tree, compressed to", len(out))
+		}
+		lh.setType(literalsBlockTreeless)
+	} else {
+		if debug {
+			println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
+		}
+		lh.setType(literalsBlockCompressed)
+	}
+	// Set sizes
+	lh.setSizes(len(out), len(b.literals), single)
+	bh.setSize(uint32(len(out) + lh.size() + 1))
+
+	// Write block headers.
+	b.output = bh.appendTo(b.output)
+	b.output = lh.appendTo(b.output)
+	// Add compressed data.
+	b.output = append(b.output, out...)
+	// No sequences.
+	b.output = append(b.output, 0)
+	return nil
+}
+
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+	if len(data) > maxSequences || len(data) < 2 {
+		return 0
+	}
+	enc := fseEncoder{}
+	hist := enc.Histogram()[:256]
+	maxSym := uint8(0)
+	for i, v := range data {
+		v = v & 63
+		data[i] = v
+		hist[v]++
+		if v > maxSym {
+			maxSym = v
+		}
+	}
+	if maxSym == 0 {
+		// All 0
+		return 0
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	cnt := maxCount(hist[:maxSym])
+	if cnt == len(data) {
+		// RLE
+		return 0
+	}
+	enc.HistogramFinished(maxSym, cnt)
+	err := enc.normalizeCount(len(data))
+	if err != nil {
+		return 0
+	}
+	_, err = enc.writeCount(nil)
+	if err != nil {
+		panic(err)
+	}
+	return 1
+}
+
+// encode will encode the block and append the output in b.output.
+func (b *blockEnc) encode(raw bool) error {
+	if len(b.sequences) == 0 {
+		return b.encodeLits(raw)
+	}
+	// We want some difference
+	if len(b.literals) > (b.size - (b.size >> 5)) {
+		return errIncompressible
+	}
+
+	var bh blockHeader
+	var lh literalsHeader
+	bh.setLast(b.last)
+	bh.setType(blockTypeCompressed)
+	// Store offset of the block header. Needed when we know the size.
+	bhOffset := len(b.output)
+	b.output = bh.appendTo(b.output)
+
+	var (
+		out            []byte
+		reUsed, single bool
+		err            error
+	)
+	if len(b.literals) >= 1024 && !raw {
+		// Use 4 Streams.
+		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+	} else if len(b.literals) > 32 && !raw {
+		// Use 1 stream
+		single = true
+		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+	} else {
+		err = huff0.ErrIncompressible
+	}
+
+	switch err {
+	case huff0.ErrIncompressible:
+		lh.setType(literalsBlockRaw)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals...)
+		if debug {
+			println("Adding literals RAW, length", len(b.literals))
+		}
+	case huff0.ErrUseRLE:
+		lh.setType(literalsBlockRLE)
+		lh.setSize(len(b.literals))
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, b.literals[0])
+		if debug {
+			println("Adding literals RLE")
+		}
+	default:
+		if debug {
+			println("Adding literals ERROR:", err)
+		}
+		return err
+	case nil:
+		// Compressed litLen...
+		if reUsed {
+			if debug {
+				println("reused tree")
+			}
+			lh.setType(literalsBlockTreeless)
+		} else {
+			if debug {
+				println("new tree, size:", len(b.litEnc.OutTable))
+			}
+			lh.setType(literalsBlockCompressed)
+			if debug {
+				_, _, err := huff0.ReadTable(out, nil)
+				if err != nil {
+					panic(err)
+				}
+			}
+		}
+		lh.setSizes(len(out), len(b.literals), single)
+		if debug {
+			printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
+			println("Adding literal header:", lh)
+		}
+		b.output = lh.appendTo(b.output)
+		b.output = append(b.output, out...)
+		b.litEnc.Reuse = huff0.ReusePolicyAllow
+		if debug {
+			println("Adding literals compressed")
+		}
+	}
+	// Sequence compression
+
+	// Write the number of sequences
+	switch {
+	case len(b.sequences) < 128:
+		b.output = append(b.output, uint8(len(b.sequences)))
+	case len(b.sequences) < 0x7f00: // TODO: this could be wrong
+		n := len(b.sequences)
+		b.output = append(b.output, 128+uint8(n>>8), uint8(n))
+	default:
+		n := len(b.sequences) - 0x7f00
+		b.output = append(b.output, 255, uint8(n), uint8(n>>8))
+	}
+	if debug {
+		println("Encoding", len(b.sequences), "sequences")
+	}
+	b.genCodes()
+	llEnc := b.coders.llEnc
+	ofEnc := b.coders.ofEnc
+	mlEnc := b.coders.mlEnc
+	err = llEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = ofEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+	err = mlEnc.normalizeCount(len(b.sequences))
+	if err != nil {
+		return err
+	}
+
+	// Choose the best compression mode for each type.
+	// Will evaluate the new vs predefined and previous.
+	chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
+		// See if predefined/previous is better
+		hist := cur.count[:cur.symbolLen]
+		nSize := cur.approxSize(hist) + cur.maxHeaderSize()
+		predefSize := preDef.approxSize(hist)
+		prevSize := prev.approxSize(hist)
+
+		// Add a small penalty for new encoders.
+		// Don't bother with extremely small (<2 byte gains).
+		nSize = nSize + (nSize+2*8*16)>>4
+		switch {
+		case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
+			if debug {
+				println("Using predefined", predefSize>>3, "<=", nSize>>3)
+			}
+			return preDef, compModePredefined
+		case prevSize <= nSize:
+			if debug {
+				println("Using previous", prevSize>>3, "<=", nSize>>3)
+			}
+			return prev, compModeRepeat
+		default:
+			if debug {
+				println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
+				println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
+			}
+			return cur, compModeFSE
+		}
+	}
+
+	// Write compression mode
+	var mode uint8
+	if llEnc.useRLE {
+		mode |= uint8(compModeRLE) << 6
+		llEnc.setRLE(b.sequences[0].llCode)
+		if debug {
+			println("llEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
+		mode |= uint8(m) << 6
+	}
+	if ofEnc.useRLE {
+		mode |= uint8(compModeRLE) << 4
+		ofEnc.setRLE(b.sequences[0].ofCode)
+		if debug {
+			println("ofEnc.useRLE")
+		}
+	} else {
+		var m seqCompMode
+		ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
+		mode |= uint8(m) << 4
+	}
+
+	if mlEnc.useRLE {
+		mode |= uint8(compModeRLE) << 2
+		mlEnc.setRLE(b.sequences[0].mlCode)
+		if debug {
+			println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
+		}
+	} else {
+		var m seqCompMode
+		mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
+		mode |= uint8(m) << 2
+	}
+	b.output = append(b.output, mode)
+	if debug {
+		printf("Compression modes: 0b%b", mode)
+	}
+	b.output, err = llEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	start := len(b.output)
+	b.output, err = ofEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+	if false {
+		println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
+		for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
+		}
+	}
+	b.output, err = mlEnc.writeCount(b.output)
+	if err != nil {
+		return err
+	}
+
+	// Maybe in block?
+	wr := &b.wr
+	wr.reset(b.output)
+
+	var ll, of, ml cState
+
+	// Current sequence
+	seq := len(b.sequences) - 1
+	s := b.sequences[seq]
+	llEnc.setBits(llBitsTable[:])
+	mlEnc.setBits(mlBitsTable[:])
+	ofEnc.setBits(nil)
+
+	llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
+
+	// We have 3 bounds checks here (and in the loop).
+	// Since we are iterating backwards it is kinda hard to avoid.
+	llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+	ll.init(wr, &llEnc.ct, llB)
+	of.init(wr, &ofEnc.ct, ofB)
+	wr.flush32()
+	ml.init(wr, &mlEnc.ct, mlB)
+
+	// Each of these lookups also generates a bounds check.
+	wr.addBits32NC(s.litLen, llB.outBits)
+	wr.addBits32NC(s.matchLen, mlB.outBits)
+	wr.flush32()
+	wr.addBits32NC(s.offset, ofB.outBits)
+	if debugSequences {
+		println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
+	}
+	seq--
+	if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
+		// No need to flush (common)
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is 8 for all.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// We checked that all can stay within 32 bits
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	} else {
+		for seq >= 0 {
+			s = b.sequences[seq]
+			wr.flush32()
+			llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+			// tabelog max is below 8 for each.
+			of.encode(ofB)
+			ml.encode(mlB)
+			ll.encode(llB)
+			wr.flush32()
+
+			// ml+ll = max 32 bits total
+			wr.addBits32NC(s.litLen, llB.outBits)
+			wr.addBits32NC(s.matchLen, mlB.outBits)
+			wr.flush32()
+			wr.addBits32NC(s.offset, ofB.outBits)
+
+			if debugSequences {
+				println("Encoded seq", seq, s)
+			}
+
+			seq--
+		}
+	}
+	ml.flush(mlEnc.actualTableLog)
+	of.flush(ofEnc.actualTableLog)
+	ll.flush(llEnc.actualTableLog)
+	err = wr.close()
+	if err != nil {
+		return err
+	}
+	b.output = wr.out
+
+	if len(b.output)-3-bhOffset >= b.size {
+		// Maybe even add a bigger margin.
+		b.litEnc.Reuse = huff0.ReusePolicyNone
+		return errIncompressible
+	}
+
+	// Size is output minus block header.
+	bh.setSize(uint32(len(b.output)-bhOffset) - 3)
+	if debug {
+		println("Rewriting block header", bh)
+	}
+	_ = bh.appendTo(b.output[bhOffset:bhOffset])
+	b.coders.setPrev(llEnc, mlEnc, ofEnc)
+	return nil
+}
+
+var errIncompressible = errors.New("incompressible")
+
+func (b *blockEnc) genCodes() {
+	if len(b.sequences) == 0 {
+		// nothing to do
+		return
+	}
+
+	if len(b.sequences) > math.MaxUint16 {
+		panic("can only encode up to 64K sequences")
+	}
+	// No bounds checks after here:
+	llH := b.coders.llEnc.Histogram()[:256]
+	ofH := b.coders.ofEnc.Histogram()[:256]
+	mlH := b.coders.mlEnc.Histogram()[:256]
+	for i := range llH {
+		llH[i] = 0
+	}
+	for i := range ofH {
+		ofH[i] = 0
+	}
+	for i := range mlH {
+		mlH[i] = 0
+	}
+
+	var llMax, ofMax, mlMax uint8
+	for i, seq := range b.sequences {
+		v := llCode(seq.litLen)
+		seq.llCode = v
+		llH[v]++
+		if v > llMax {
+			llMax = v
+		}
+
+		v = ofCode(seq.offset)
+		seq.ofCode = v
+		ofH[v]++
+		if v > ofMax {
+			ofMax = v
+		}
+
+		v = mlCode(seq.matchLen)
+		seq.mlCode = v
+		mlH[v]++
+		if v > mlMax {
+			mlMax = v
+			if debug && mlMax > maxMatchLengthSymbol {
+				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
+			}
+		}
+		b.sequences[i] = seq
+	}
+	maxCount := func(a []uint32) int {
+		var max uint32
+		for _, v := range a {
+			if v > max {
+				max = v
+			}
+		}
+		return int(max)
+	}
+	if mlMax > maxMatchLengthSymbol {
+		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
+	}
+	if ofMax > maxOffsetBits {
+		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
+	}
+	if llMax > maxLiteralLengthSymbol {
+		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
+	}
+
+	b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
+	b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
+	b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
new file mode 100644
index 0000000..01a01e4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
@@ -0,0 +1,85 @@
+// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
+
+package zstd
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[blockTypeRaw-0]
+	_ = x[blockTypeRLE-1]
+	_ = x[blockTypeCompressed-2]
+	_ = x[blockTypeReserved-3]
+}
+
+const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
+
+var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
+
+func (i blockType) String() string {
+	if i >= blockType(len(_blockType_index)-1) {
+		return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[literalsBlockRaw-0]
+	_ = x[literalsBlockRLE-1]
+	_ = x[literalsBlockCompressed-2]
+	_ = x[literalsBlockTreeless-3]
+}
+
+const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
+
+var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
+
+func (i literalsBlockType) String() string {
+	if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
+		return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[compModePredefined-0]
+	_ = x[compModeRLE-1]
+	_ = x[compModeFSE-2]
+	_ = x[compModeRepeat-3]
+}
+
+const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
+
+var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
+
+func (i seqCompMode) String() string {
+	if i >= seqCompMode(len(_seqCompMode_index)-1) {
+		return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[tableLiteralLengths-0]
+	_ = x[tableOffsets-1]
+	_ = x[tableMatchLengths-2]
+}
+
+const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
+
+var _tableIndex_index = [...]uint8{0, 19, 31, 48}
+
+func (i tableIndex) String() string {
+	if i >= tableIndex(len(_tableIndex_index)-1) {
+		return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
new file mode 100644
index 0000000..07321ac
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -0,0 +1,127 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+type byteBuffer interface {
+	// Read up to 8 bytes.
+	// Returns nil if no more input is available.
+	readSmall(n int) []byte
+
+	// Read >8 bytes.
+	// MAY use the destination slice.
+	readBig(n int, dst []byte) ([]byte, error)
+
+	// Read a single byte.
+	readByte() (byte, error)
+
+	// Skip n bytes.
+	skipN(n int) error
+}
+
+// in-memory buffer
+type byteBuf []byte
+
+func (b *byteBuf) readSmall(n int) []byte {
+	if debug && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	bb := *b
+	if len(bb) < n {
+		return nil
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r
+}
+
+func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
+	bb := *b
+	if len(bb) < n {
+		return nil, io.ErrUnexpectedEOF
+	}
+	r := bb[:n]
+	*b = bb[n:]
+	return r, nil
+}
+
+func (b *byteBuf) remain() []byte {
+	return *b
+}
+
+func (b *byteBuf) readByte() (byte, error) {
+	bb := *b
+	if len(bb) < 1 {
+		return 0, nil
+	}
+	r := bb[0]
+	*b = bb[1:]
+	return r, nil
+}
+
+func (b *byteBuf) skipN(n int) error {
+	bb := *b
+	if len(bb) < n {
+		return io.ErrUnexpectedEOF
+	}
+	*b = bb[n:]
+	return nil
+}
+
+// wrapper around a reader.
+type readerWrapper struct {
+	r   io.Reader
+	tmp [8]byte
+}
+
+func (r *readerWrapper) readSmall(n int) []byte {
+	if debug && n > 8 {
+		panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+	}
+	n2, err := io.ReadFull(r.r, r.tmp[:n])
+	// We only really care about the actual bytes read.
+	if n2 != n {
+		if debug {
+			println("readSmall: got", n2, "want", n, "err", err)
+		}
+		return nil
+	}
+	return r.tmp[:n]
+}
+
+func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
+	if cap(dst) < n {
+		dst = make([]byte, n)
+	}
+	n2, err := io.ReadFull(r.r, dst[:n])
+	if err == io.EOF && n > 0 {
+		err = io.ErrUnexpectedEOF
+	}
+	return dst[:n2], err
+}
+
+func (r *readerWrapper) readByte() (byte, error) {
+	n2, err := r.r.Read(r.tmp[:1])
+	if err != nil {
+		return 0, err
+	}
+	if n2 != 1 {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return r.tmp[0], nil
+}
+
+func (r *readerWrapper) skipN(n int) error {
+	n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
+	if n2 != int64(n) {
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
new file mode 100644
index 0000000..dc4378b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -0,0 +1,74 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+	b   []byte
+	off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+	b.b = in
+	b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+	b.off += int(n)
+}
+
+// overread returns whether we have advanced too far.
+func (b *byteReader) overread() bool {
+	return b.off > len(b.b)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+	b2 := b.b[b.off : b.off+4 : b.off+4]
+	v3 := int32(b2[3])
+	v2 := int32(b2[2])
+	v1 := int32(b2[1])
+	v0 := int32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint8 returns the next byte
+func (b *byteReader) Uint8() uint8 {
+	v := b.b[b.off]
+	return v
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+	if r := b.remain(); r < 4 {
+		// Very rare
+		v := uint32(0)
+		for i := 1; i <= r; i++ {
+			v = (v << 8) | uint32(b.b[len(b.b)-i])
+		}
+		return v
+	}
+	b2 := b.b[b.off : b.off+4 : b.off+4]
+	v3 := uint32(b2[3])
+	v2 := uint32(b2[2])
+	v1 := uint32(b2[1])
+	v0 := uint32(b2[0])
+	return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+	return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+	return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
new file mode 100644
index 0000000..35a3cda
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -0,0 +1,513 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"sync"
+)
+
+// Decoder provides decoding of zstandard streams.
+// The decoder has been designed to operate without allocations after a warmup.
+// This means that you should store the decoder for best performance.
+// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
+// A decoder can safely be re-used even if the previous stream failed.
+// To release the resources, you must call the Close() function on a decoder.
+type Decoder struct {
+	o decoderOptions
+
+	// Unreferenced decoders, ready for use.
+	decoders chan *blockDec
+
+	// Unreferenced decoders, ready for use.
+	frames chan *frameDec
+
+	// Streams ready to be decoded.
+	stream chan decodeStream
+
+	// Current read position used for Reader functionality.
+	current decoderState
+
+	// Custom dictionaries
+	dicts map[uint32]struct{}
+
+	// streamWg is the waitgroup for all streams
+	streamWg sync.WaitGroup
+}
+
+// decoderState is used for maintaining state when the decoder
+// is used for streaming.
+type decoderState struct {
+	// current block being written to stream.
+	decodeOutput
+
+	// output in order to be written to stream.
+	output chan decodeOutput
+
+	// cancel remaining output.
+	cancel chan struct{}
+
+	flushed bool
+}
+
+var (
+	// Check the interfaces we want to support.
+	_ = io.WriterTo(&Decoder{})
+	_ = io.Reader(&Decoder{})
+)
+
+// NewReader creates a new decoder.
+// A nil Reader can be provided in which case Reset can be used to start a decode.
+//
+// A Decoder can be used in two modes:
+//
+// 1) As a stream, or
+// 2) For stateless decoding using DecodeAll or DecodeBuffer.
+//
+// Only a single stream can be decoded concurrently, but the same decoder
+// can run multiple concurrent stateless decodes. It is even possible to
+// use stateless decodes while a stream is being decoded.
+//
+// The Reset function can be used to initiate a new stream, which is will considerably
+// reduce the allocations normally caused by NewReader.
+func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+	initPredefined()
+	var d Decoder
+	d.o.setDefault()
+	for _, o := range opts {
+		err := o(&d.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	d.current.output = make(chan decodeOutput, d.o.concurrent)
+	d.current.flushed = true
+
+	// Create decoders
+	d.decoders = make(chan *blockDec, d.o.concurrent)
+	d.frames = make(chan *frameDec, d.o.concurrent)
+	for i := 0; i < d.o.concurrent; i++ {
+		d.frames <- newFrameDec(d.o)
+		d.decoders <- newBlockDec(d.o.lowMem)
+	}
+
+	if r == nil {
+		return &d, nil
+	}
+	return &d, d.Reset(r)
+}
+
+// Read bytes from the decompressed stream into p.
+// Returns the number of bytes written and any error that occurred.
+// When the stream is done, io.EOF will be returned.
+func (d *Decoder) Read(p []byte) (int, error) {
+	if d.stream == nil {
+		return 0, errors.New("no input has been initialized")
+	}
+	var n int
+	for {
+		if len(d.current.b) > 0 {
+			filled := copy(p, d.current.b)
+			p = p[filled:]
+			d.current.b = d.current.b[filled:]
+			n += filled
+		}
+		if len(p) == 0 {
+			break
+		}
+		if len(d.current.b) == 0 {
+			// We have an error and no more data
+			if d.current.err != nil {
+				break
+			}
+			if !d.nextBlock(n == 0) {
+				return n, nil
+			}
+		}
+	}
+	if len(d.current.b) > 0 {
+		if debug {
+			println("returning", n, "still bytes left:", len(d.current.b))
+		}
+		// Only return error at end of block
+		return n, nil
+	}
+	if d.current.err != nil {
+		d.drainOutput()
+	}
+	if debug {
+		println("returning", n, d.current.err, len(d.decoders))
+	}
+	return n, d.current.err
+}
+
+// Reset will reset the decoder the supplied stream after the current has finished processing.
+// Note that this functionality cannot be used after Close has been called.
+func (d *Decoder) Reset(r io.Reader) error {
+	if d.current.err == ErrDecoderClosed {
+		return d.current.err
+	}
+	if r == nil {
+		return errors.New("nil Reader sent as input")
+	}
+
+	if d.stream == nil {
+		d.stream = make(chan decodeStream, 1)
+		d.streamWg.Add(1)
+		go d.startStreamDecoder(d.stream)
+	}
+
+	d.drainOutput()
+
+	// If bytes buffer and < 1MB, do sync decoding anyway.
+	if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
+		if debug {
+			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
+		}
+		b := bb.Bytes()
+		dst, err := d.DecodeAll(b, nil)
+		if err == nil {
+			err = io.EOF
+		}
+		d.current.b = dst
+		d.current.err = err
+		d.current.flushed = true
+		if debug {
+			println("sync decode to ", len(dst), "bytes, err:", err)
+		}
+		return nil
+	}
+
+	// Remove current block.
+	d.current.decodeOutput = decodeOutput{}
+	d.current.err = nil
+	d.current.cancel = make(chan struct{})
+	d.current.flushed = false
+	d.current.d = nil
+
+	d.stream <- decodeStream{
+		r:      r,
+		output: d.current.output,
+		cancel: d.current.cancel,
+	}
+	return nil
+}
+
+// drainOutput will drain the output until errEndOfStream is sent.
+func (d *Decoder) drainOutput() {
+	if d.current.cancel != nil {
+		println("cancelling current")
+		close(d.current.cancel)
+		d.current.cancel = nil
+	}
+	if d.current.d != nil {
+		if debug {
+			printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+		d.current.b = nil
+	}
+	if d.current.output == nil || d.current.flushed {
+		println("current already flushed")
+		return
+	}
+	for {
+		select {
+		case v := <-d.current.output:
+			if v.d != nil {
+				if debug {
+					printf("re-adding decoder %p", v.d)
+				}
+				d.decoders <- v.d
+			}
+			if v.err == errEndOfStream {
+				println("current flushed")
+				d.current.flushed = true
+				return
+			}
+		}
+	}
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
+	if d.stream == nil {
+		return 0, errors.New("no input has been initialized")
+	}
+	var n int64
+	for {
+		if len(d.current.b) > 0 {
+			n2, err2 := w.Write(d.current.b)
+			n += int64(n2)
+			if err2 != nil && d.current.err == nil {
+				d.current.err = err2
+				break
+			}
+		}
+		if d.current.err != nil {
+			break
+		}
+		d.nextBlock(true)
+	}
+	err := d.current.err
+	if err != nil {
+		d.drainOutput()
+	}
+	if err == io.EOF {
+		err = nil
+	}
+	return n, err
+}
+
+// DecodeAll allows stateless decoding of a blob of bytes.
+// Output will be appended to dst, so if the destination size is known
+// you can pre-allocate the destination slice to avoid allocations.
+// DecodeAll can be used concurrently.
+// The Decoder concurrency limits will be respected.
+func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
+	if d.current.err == ErrDecoderClosed {
+		return dst, ErrDecoderClosed
+	}
+
+	// Grab a block decoder and frame decoder.
+	block, frame := <-d.decoders, <-d.frames
+	defer func() {
+		if debug {
+			printf("re-adding decoder: %p", block)
+		}
+		d.decoders <- block
+		frame.rawInput = nil
+		frame.bBuf = nil
+		d.frames <- frame
+	}()
+	frame.bBuf = input
+
+	for {
+		err := frame.reset(&frame.bBuf)
+		if err == io.EOF {
+			return dst, nil
+		}
+		if err != nil {
+			return dst, err
+		}
+		if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			return dst, ErrDecoderSizeExceeded
+		}
+		if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+			// Never preallocate moe than 1 GB up front.
+			if uint64(cap(dst)) < frame.FrameContentSize {
+				dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+				copy(dst2, dst)
+				dst = dst2
+			}
+		}
+		if cap(dst) == 0 {
+			// Allocate window size * 2 by default if nothing is provided and we didn't get frame content size.
+			size := frame.WindowSize * 2
+			// Cap to 1 MB.
+			if size > 1<<20 {
+				size = 1 << 20
+			}
+			dst = make([]byte, 0, frame.WindowSize)
+		}
+
+		dst, err = frame.runDecoder(dst, block)
+		if err != nil {
+			return dst, err
+		}
+		if len(frame.bBuf) == 0 {
+			break
+		}
+	}
+	return dst, nil
+}
+
+// nextBlock returns the next block.
+// If an error occurs d.err will be set.
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
+	if d.current.d != nil {
+		if debug {
+			printf("re-adding current decoder %p", d.current.d)
+		}
+		d.decoders <- d.current.d
+		d.current.d = nil
+	}
+	if d.current.err != nil {
+		// Keep error state.
+		return blocking
+	}
+
+	if blocking {
+		d.current.decodeOutput = <-d.current.output
+	} else {
+		select {
+		case d.current.decodeOutput = <-d.current.output:
+		default:
+			return false
+		}
+	}
+	if debug {
+		println("got", len(d.current.b), "bytes, error:", d.current.err)
+	}
+	return true
+}
+
+// Close will release all resources.
+// It is NOT possible to reuse the decoder after this.
+func (d *Decoder) Close() {
+	if d.current.err == ErrDecoderClosed {
+		return
+	}
+	d.drainOutput()
+	if d.stream != nil {
+		close(d.stream)
+		d.streamWg.Wait()
+		d.stream = nil
+	}
+	if d.decoders != nil {
+		close(d.decoders)
+		for dec := range d.decoders {
+			dec.Close()
+		}
+		d.decoders = nil
+	}
+	if d.current.d != nil {
+		d.current.d.Close()
+		d.current.d = nil
+	}
+	d.current.err = ErrDecoderClosed
+}
+
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+	return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+	d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+	return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+	return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+	c.d.Close()
+	return nil
+}
+
+type decodeOutput struct {
+	d   *blockDec
+	b   []byte
+	err error
+}
+
+type decodeStream struct {
+	r io.Reader
+
+	// Blocks ready to be written to output.
+	output chan decodeOutput
+
+	// cancel reading from the input
+	cancel chan struct{}
+}
+
+// errEndOfStream indicates that everything from the stream was read.
+var errEndOfStream = errors.New("end-of-stream")
+
+// Create Decoder:
+// Spawn n block decoders. These accept tasks to decode a block.
+// Create goroutine that handles stream processing, this will send history to decoders as they are available.
+// Decoders update the history as they decode.
+// When a block is returned:
+// 		a) history is sent to the next decoder,
+// 		b) content written to CRC.
+// 		c) return data to WRITER.
+// 		d) wait for next block to return data.
+// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
+func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+	defer d.streamWg.Done()
+	frame := newFrameDec(d.o)
+	for stream := range inStream {
+		if debug {
+			println("got new stream")
+		}
+		br := readerWrapper{r: stream.r}
+	decodeStream:
+		for {
+			err := frame.reset(&br)
+			if debug && err != nil {
+				println("Frame decoder returned", err)
+			}
+			if err != nil {
+				stream.output <- decodeOutput{
+					err: err,
+				}
+				break
+			}
+			if debug {
+				println("starting frame decoder")
+			}
+
+			// This goroutine will forward history between frames.
+			frame.frameDone.Add(1)
+			frame.initAsync()
+
+			go frame.startDecoder(stream.output)
+		decodeFrame:
+			// Go through all blocks of the frame.
+			for {
+				dec := <-d.decoders
+				select {
+				case <-stream.cancel:
+					if !frame.sendErr(dec, io.EOF) {
+						// To not let the decoder dangle, send it back.
+						stream.output <- decodeOutput{d: dec}
+					}
+					break decodeStream
+				default:
+				}
+				err := frame.next(dec)
+				switch err {
+				case io.EOF:
+					// End of current frame, no error
+					println("EOF on next block")
+					break decodeFrame
+				case nil:
+					continue
+				default:
+					println("block decoder returned", err)
+					break decodeStream
+				}
+			}
+			// All blocks have started decoding, check if there are more frames.
+			println("waiting for done")
+			frame.frameDone.Wait()
+			println("done waiting...")
+		}
+		frame.frameDone.Wait()
+		println("Sending EOS")
+		stream.output <- decodeOutput{err: errEndOfStream}
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
new file mode 100644
index 0000000..2ac9cd2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -0,0 +1,68 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+)
+
+// DOption is an option for creating a decoder.
+type DOption func(*decoderOptions) error
+
+// options retains accumulated state of multiple options.
+type decoderOptions struct {
+	lowMem         bool
+	concurrent     int
+	maxDecodedSize uint64
+}
+
+func (o *decoderOptions) setDefault() {
+	*o = decoderOptions{
+		// use less ram: true for now, but may change.
+		lowMem:     true,
+		concurrent: runtime.GOMAXPROCS(0),
+	}
+	o.maxDecodedSize = 1 << 63
+}
+
+// WithDecoderLowmem will set whether to use a lower amount of memory,
+// but possibly have to allocate more while running.
+func WithDecoderLowmem(b bool) DOption {
+	return func(o *decoderOptions) error { o.lowMem = b; return nil }
+}
+
+// WithDecoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithDecoderConcurrency(n int) DOption {
+	return func(o *decoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("Concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
+// non-streaming operations or maximum window size for streaming operations.
+// This can be used to control memory usage of potentially hostile content.
+// For streaming operations, the maximum window size is capped at 1<<30 bytes.
+// Maximum and default is 1 << 63 bytes.
+func WithDecoderMaxMemory(n uint64) DOption {
+	return func(o *decoderOptions) error {
+		if n == 0 {
+			return errors.New("WithDecoderMaxMemory must be at least 1")
+		}
+		if n > 1<<63 {
+			return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63")
+		}
+		o.maxDecodedSize = n
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
new file mode 100644
index 0000000..ee3b09b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -0,0 +1,726 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+	dFastLongTableBits = 17                      // Bits used in the long match table
+	dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
+	dFastLongTableMask = dFastLongTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+
+	dFastShortTableBits = tableBits                // Bits used in the short match table
+	dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
+	dFastShortTableMask = dFastShortTableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+)
+
+type doubleFastEncoder struct {
+	fastEncoder
+	longTable [dFastLongTableSize]tableEntry
+}
+
+// Encode mimmics functionality in zstd_dfast.c
+func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur > (1<<30)+e.maxMatchOff {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.longTable[:] {
+				e.longTable[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.longTable[:] {
+			v := e.longTable[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.longTable[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	stepSize := int32(e.o.targetLength)
+	if stepSize == 0 {
+		stepSize++
+	}
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		// We allow the encoder to optionally turn off repeat offsets across blocks
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debug && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if canRepeat {
+				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+				const repOff2 = 1
+				// We deviate from the reference encoder and also check offset 2.
+				// Slower and not consistently better, so disabled.
+				// repIndex = s - offset2 + repOff2
+				if false && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff2*8)) {
+					// Consider history as well.
+					var seq seq
+					lenght := 4 + e.matchlen(s+4+repOff2, repIndex+4, src)
+
+					seq.matchLen = uint32(lenght - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff2
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 2
+					seq.offset = 2
+					if debugSequences {
+						println("repeat sequence 2", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += lenght + repOff2
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, lenght)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					// Swap offsets
+					offset1, offset2 = offset2, offset1
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		if debug && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+		e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hash5(cv0, dFastShortTableBits)] = te0
+		e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+		cv = load6432(src, s)
+
+		if !canRepeat {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv1>>8, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		// Input margin is the number of bytes we read (8)
+		// and the maximum we will read ahead (2)
+		inputMargin            = 8 + 2
+		minNonLiteralBlockSize = 16
+	)
+
+	// Protect against e.cur wraparound.
+	if e.cur > (1<<30)+e.maxMatchOff {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		for i := range e.longTable[:] {
+			e.longTable[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 1.
+	stepSize := int32(e.o.targetLength)
+	if stepSize == 0 {
+		stepSize++
+	}
+
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		var t int32
+		for {
+
+			nextHashS := hash5(cv, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+			candidateL := e.longTable[nextHashL]
+			candidateS := e.table[nextHashS]
+
+			const repOff = 1
+			repIndex := s - offset1 + repOff
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+
+			if len(blk.sequences) > 2 {
+				if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+					// Consider history as well.
+					var seq seq
+					//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+					seq.matchLen = uint32(length - zstdMinMatch)
+
+					// We might be able to match backwards.
+					// Extend as long as we can.
+					start := s + repOff
+					// We end the search early, so we don't risk 0 literals
+					// and have to do special offset treatment.
+					startLimit := nextEmit + 1
+
+					tMin := s - e.maxMatchOff
+					if tMin < 0 {
+						tMin = 0
+					}
+					for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+						repIndex--
+						start--
+						seq.matchLen++
+					}
+					addLiterals(&seq, start)
+
+					// rep 0
+					seq.offset = 1
+					if debugSequences {
+						println("repeat sequence", seq, "next s:", s)
+					}
+					blk.sequences = append(blk.sequences, seq)
+					s += length + repOff
+					nextEmit = s
+					if s >= sLimit {
+						if debug {
+							println("repeat ended", s, length)
+
+						}
+						break encodeLoop
+					}
+					cv = load6432(src, s)
+					continue
+				}
+			}
+			// Find the offsets of our two matches.
+			coffsetL := s - (candidateL.offset - e.cur)
+			coffsetS := s - (candidateS.offset - e.cur)
+
+			// Check if we have a long match.
+			if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+				// Found a long match, likely at least 8 bytes.
+				// Reference encoder checks all 8 bytes, we only check 4,
+				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+				t = candidateL.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debugMatches {
+					println("long match")
+				}
+				break
+			}
+
+			// Check if we have a short match.
+			if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+				// found a regular match
+				// See if we can find a long match at s+1
+				const checkAt = 1
+				cv := load6432(src, s+checkAt)
+				nextHashL = hash8(cv, dFastLongTableBits)
+				candidateL = e.longTable[nextHashL]
+				coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+				// We can store it, since we have at least a 4 byte match.
+				e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+				if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+					// Found a long match, likely at least 8 bytes.
+					// Reference encoder checks all 8 bytes, we only check 4,
+					// but the likelihood of both the first 4 bytes and the hash matching should be enough.
+					t = candidateL.offset - e.cur
+					s += checkAt
+					if debugMatches {
+						println("long match (after short)")
+					}
+					break
+				}
+
+				t = candidateS.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				if debugMatches {
+					println("short match")
+				}
+				break
+			}
+
+			// No match found, move forward in input.
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+
+		// A 4-byte match has been found. Update recent offsets.
+		// We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlen(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+
+		// Index match start+1 (long) and start+2 (short)
+		index0 := s - l + 1
+		// Index match end-2 (long) and end-1 (short)
+		index1 := s - 2
+
+		cv0 := load6432(src, index0)
+		cv1 := load6432(src, index1)
+		te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+		te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+		e.longTable[hash8(cv0, dFastLongTableBits)] = te0
+		e.longTable[hash8(cv1, dFastLongTableBits)] = te1
+		cv0 >>= 8
+		cv1 >>= 8
+		te0.offset++
+		te1.offset++
+		te0.val = uint32(cv0)
+		te1.val = uint32(cv1)
+		e.table[hash5(cv0, dFastShortTableBits)] = te0
+		e.table[hash5(cv1, dFastShortTableBits)] = te1
+
+		cv = load6432(src, s)
+
+		if len(blk.sequences) <= 2 {
+			continue
+		}
+
+		// Check offset 2
+		for {
+			o2 := s - offset2
+			if load3232(src, o2) != uint32(cv) {
+				// Do regular search
+				break
+			}
+
+			// Store this, since we have it.
+			nextHashS := hash5(cv1>>8, dFastShortTableBits)
+			nextHashL := hash8(cv, dFastLongTableBits)
+
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlen(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.longTable[nextHashL] = entry
+			e.table[nextHashS] = entry
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				// Finished
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
new file mode 100644
index 0000000..0bdddac
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -0,0 +1,656 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"math/bits"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+	tableBits      = 15             // Bits used in the table
+	tableSize      = 1 << tableBits // Size of the table
+	tableMask      = tableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks.
+	maxMatchLength = 131074
+)
+
+type tableEntry struct {
+	val    uint32
+	offset int32
+}
+
+type fastEncoder struct {
+	o encParams
+	// cur is the offset at the start of hist
+	cur int32
+	// maximum offset. Should be at least 2x block size.
+	maxMatchOff int32
+	hist        []byte
+	crc         *xxhash.Digest
+	table       [tableSize]tableEntry
+	tmp         [8]byte
+	blk         *blockEnc
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastEncoder) CRC() *xxhash.Digest {
+	return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastEncoder) AppendCRC(dst []byte) []byte {
+	crc := e.crc.Sum(e.tmp[:0])
+	dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+	return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastEncoder) WindowSize(size int) int32 {
+	if size > 0 && size < int(e.maxMatchOff) {
+		b := int32(1) << uint(bits.Len(uint(size)))
+		// Keep minimum window.
+		if b < 1024 {
+			b = 1024
+		}
+		return b
+	}
+	return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastEncoder) Block() *blockEnc {
+	return e.blk
+}
+
+// Encode mimmics functionality in zstd_fast.c
+func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+
+	// Protect against e.cur wraparound.
+	for e.cur > (1<<30)+e.maxMatchOff {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = e.maxMatchOff
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v < minOff {
+				v = 0
+			} else {
+				v = v - e.cur + e.maxMatchOff
+			}
+			e.table[i].offset = v
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := e.addBlock(src)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	// Override src
+	src = e.hist
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	stepSize := int32(e.o.targetLength)
+	if stepSize == 0 {
+		stepSize++
+	}
+	stepSize++
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+		canRepeat := len(blk.sequences) > 2
+
+		for {
+			if debug && canRepeat && offset1 == 0 {
+				panic("offset0 was 0")
+			}
+
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+
+				seq.matchLen = uint32(lenght - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += lenght + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, lenght)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		if debug && canRepeat && int(offset1) > len(src) {
+			panic("invalid offset")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlen(s+4, t+4, src) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			l := 4 + e.matchlen(s+4, o2+4, src)
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	blk.recentOffsets[0] = uint32(offset1)
+	blk.recentOffsets[1] = uint32(offset2)
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+	const (
+		inputMargin            = 8
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+	if debug {
+		if len(src) > maxBlockSize {
+			panic("src too big")
+		}
+	}
+	// Protect against e.cur wraparound.
+	if e.cur > (1<<30)+e.maxMatchOff {
+		for i := range e.table[:] {
+			e.table[i] = tableEntry{}
+		}
+		e.cur = e.maxMatchOff
+	}
+
+	s := int32(0)
+	blk.size = len(src)
+	if len(src) < minNonLiteralBlockSize {
+		blk.extraLits = len(src)
+		blk.literals = blk.literals[:len(src)]
+		copy(blk.literals, src)
+		return
+	}
+
+	sLimit := int32(len(src)) - inputMargin
+	// stepSize is the number of bytes to skip on every main loop iteration.
+	// It should be >= 2.
+	const stepSize = 2
+
+	// TEMPLATE
+	const hashLog = tableBits
+	// seems global, but would be nice to tweak.
+	const kSearchStrength = 8
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := s
+	cv := load6432(src, s)
+
+	// Relative offsets
+	offset1 := int32(blk.recentOffsets[0])
+	offset2 := int32(blk.recentOffsets[1])
+
+	addLiterals := func(s *seq, until int32) {
+		if until == nextEmit {
+			return
+		}
+		blk.literals = append(blk.literals, src[nextEmit:until]...)
+		s.litLen = uint32(until - nextEmit)
+	}
+	if debug {
+		println("recent offsets:", blk.recentOffsets)
+	}
+
+encodeLoop:
+	for {
+		// t will contain the match offset when we find one.
+		// When existing the search loop, we have already checked 4 bytes.
+		var t int32
+
+		// We will not use repeat offsets across blocks.
+		// By not using them for the first 3 matches
+
+		for {
+			nextHash := hash6(cv, hashLog)
+			nextHash2 := hash6(cv>>8, hashLog)
+			candidate := e.table[nextHash]
+			candidate2 := e.table[nextHash2]
+			repIndex := s - offset1 + 2
+
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+			if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+				// Consider history as well.
+				var seq seq
+				// lenght := 4 + e.matchlen(s+6, repIndex+4, src)
+				lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
+
+				seq.matchLen = uint32(lenght - zstdMinMatch)
+
+				// We might be able to match backwards.
+				// Extend as long as we can.
+				start := s + 2
+				// We end the search early, so we don't risk 0 literals
+				// and have to do special offset treatment.
+				startLimit := nextEmit + 1
+
+				sMin := s - e.maxMatchOff
+				if sMin < 0 {
+					sMin = 0
+				}
+				for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+					repIndex--
+					start--
+					seq.matchLen++
+				}
+				addLiterals(&seq, start)
+
+				// rep 0
+				seq.offset = 1
+				if debugSequences {
+					println("repeat sequence", seq, "next s:", s)
+				}
+				blk.sequences = append(blk.sequences, seq)
+				s += lenght + 2
+				nextEmit = s
+				if s >= sLimit {
+					if debug {
+						println("repeat ended", s, lenght)
+
+					}
+					break encodeLoop
+				}
+				cv = load6432(src, s)
+				continue
+			}
+			coffset0 := s - (candidate.offset - e.cur)
+			coffset1 := s - (candidate2.offset - e.cur) + 1
+			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+				// found a regular match
+				t = candidate.offset - e.cur
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				break
+			}
+
+			if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+				// found a regular match
+				t = candidate2.offset - e.cur
+				s++
+				if debug && s <= t {
+					panic("s <= t")
+				}
+				if debug && s-t > e.maxMatchOff {
+					panic("s - t >e.maxMatchOff")
+				}
+				if debug && t < 0 {
+					panic("t<0")
+				}
+				break
+			}
+			s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+			if s >= sLimit {
+				break encodeLoop
+			}
+			cv = load6432(src, s)
+		}
+		// A 4-byte match has been found. We'll later see if more than 4 bytes.
+		offset2 = offset1
+		offset1 = s - t
+
+		if debug && s <= t {
+			panic("s <= t")
+		}
+
+		// Extend the 4-byte match as long as possible.
+		//l := e.matchlenNoHist(s+4, t+4, src) + 4
+		l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+		// Extend backwards
+		tMin := s - e.maxMatchOff
+		if tMin < 0 {
+			tMin = 0
+		}
+		for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+
+		// Write our sequence.
+		var seq seq
+		seq.litLen = uint32(s - nextEmit)
+		seq.matchLen = uint32(l - zstdMinMatch)
+		if seq.litLen > 0 {
+			blk.literals = append(blk.literals, src[nextEmit:s]...)
+		}
+		// Don't use repeat offsets
+		seq.offset = uint32(s-t) + 3
+		s += l
+		if debugSequences {
+			println("sequence", seq, "next s:", s)
+		}
+		blk.sequences = append(blk.sequences, seq)
+		nextEmit = s
+		if s >= sLimit {
+			break encodeLoop
+		}
+		cv = load6432(src, s)
+
+		// Check offset 2
+		if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+			// We have at least 4 byte match.
+			// No need to check backwards. We come straight from a match
+			//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
+			l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+			// Store this, since we have it.
+			nextHash := hash6(cv, hashLog)
+			e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+			seq.matchLen = uint32(l) - zstdMinMatch
+			seq.litLen = 0
+			// Since litlen is always 0, this is offset 1.
+			seq.offset = 1
+			s += l
+			nextEmit = s
+			if debugSequences {
+				println("sequence", seq, "next s:", s)
+			}
+			blk.sequences = append(blk.sequences, seq)
+
+			// Swap offset 1 and 2.
+			offset1, offset2 = offset2, offset1
+			if s >= sLimit {
+				break encodeLoop
+			}
+			// Prepare next loop.
+			cv = load6432(src, s)
+		}
+	}
+
+	if int(nextEmit) < len(src) {
+		blk.literals = append(blk.literals, src[nextEmit:]...)
+		blk.extraLits = len(src) - int(nextEmit)
+	}
+	if debug {
+		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+	}
+}
+
+func (e *fastEncoder) addBlock(src []byte) int32 {
+	// check if we have space already
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			l := e.maxMatchOff * 2
+			// Make it at least 1MB.
+			if l < 1<<20 {
+				l = 1 << 20
+			}
+			e.hist = make([]byte, 0, l)
+		} else {
+			if cap(e.hist) < int(e.maxMatchOff*2) {
+				panic("unexpected buffer size")
+			}
+			// Move down
+			offset := int32(len(e.hist)) - e.maxMatchOff
+			copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:e.maxMatchOff]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastEncoder) UseBlock(enc *blockEnc) {
+	enc.reset(e.blk)
+	e.blk = enc
+}
+
+func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 {
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
+
+func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
+	if debug {
+		if s < 0 {
+			panic("s<0")
+		}
+		if t < 0 {
+			panic("t<0")
+		}
+		if s-t > e.maxMatchOff {
+			panic(s - t)
+		}
+	}
+	s1 := int(s) + maxMatchLength - 4
+	if s1 > len(src) {
+		s1 = len(src)
+	}
+
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastEncoder) Reset() {
+	if e.blk == nil {
+		e.blk = &blockEnc{}
+		e.blk.init()
+	} else {
+		e.blk.reset(nil)
+	}
+	e.blk.initNewEncode()
+	if e.crc == nil {
+		e.crc = xxhash.New()
+	} else {
+		e.crc.Reset()
+	}
+	if cap(e.hist) < int(e.maxMatchOff*2) {
+		l := e.maxMatchOff * 2
+		// Make it at least 1MB.
+		if l < 1<<20 {
+			l = 1 << 20
+		}
+		e.hist = make([]byte, 0, l)
+	}
+	// We offset current position so everything will be out of reach
+	e.cur += e.maxMatchOff + int32(len(e.hist))
+	e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go
new file mode 100644
index 0000000..b6779ec
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_params.go
@@ -0,0 +1,154 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+type encParams struct {
+	// largest match distance : larger == more compression, more memory needed during decompression
+	windowLog uint8
+
+	// fully searched segment : larger == more compression, slower, more memory (useless for fast)
+	chainLog uint8
+
+	//  dispatch table : larger == faster, more memory
+	hashLog uint8
+
+	// < nb of searches : larger == more compression, slower
+	searchLog uint8
+
+	// < match length searched : larger == faster decompression, sometimes less compression
+	minMatch uint8
+
+	// acceptable match size for optimal parser (only) : larger == more compression, slower
+	targetLength uint32
+
+	// see ZSTD_strategy definition above
+	strategy strategy
+}
+
+// strategy defines the algorithm to use when generating sequences.
+type strategy uint8
+
+const (
+	// Compression strategies, listed from fastest to strongest
+	strategyFast strategy = iota + 1
+	strategyDfast
+	strategyGreedy
+	strategyLazy
+	strategyLazy2
+	strategyBtlazy2
+	strategyBtopt
+	strategyBtultra
+	strategyBtultra2
+	// note : new strategies _might_ be added in the future.
+	//   Only the order (from fast to strong) is guaranteed
+
+)
+
+var defEncParams = [4][]encParams{
+	{ // "default" - for any srcSize > 256 KB
+		// W,  C,  H,  S,  L, TL, strat
+		{19, 12, 13, 1, 6, 1, strategyFast},       // base for negative levels
+		{19, 13, 14, 1, 7, 0, strategyFast},       // level  1
+		{20, 15, 16, 1, 6, 0, strategyFast},       // level  2
+		{21, 16, 17, 1, 5, 1, strategyDfast},      // level  3
+		{21, 18, 18, 1, 5, 1, strategyDfast},      // level  4
+		{21, 18, 19, 2, 5, 2, strategyGreedy},     // level  5
+		{21, 19, 19, 3, 5, 4, strategyGreedy},     // level  6
+		{21, 19, 19, 3, 5, 8, strategyLazy},       // level  7
+		{21, 19, 19, 3, 5, 16, strategyLazy2},     // level  8
+		{21, 19, 20, 4, 5, 16, strategyLazy2},     // level  9
+		{22, 20, 21, 4, 5, 16, strategyLazy2},     // level 10
+		{22, 21, 22, 4, 5, 16, strategyLazy2},     // level 11
+		{22, 21, 22, 5, 5, 16, strategyLazy2},     // level 12
+		{22, 21, 22, 5, 5, 32, strategyBtlazy2},   // level 13
+		{22, 22, 23, 5, 5, 32, strategyBtlazy2},   // level 14
+		{22, 23, 23, 6, 5, 32, strategyBtlazy2},   // level 15
+		{22, 22, 22, 5, 5, 48, strategyBtopt},     // level 16
+		{23, 23, 22, 5, 4, 64, strategyBtopt},     // level 17
+		{23, 23, 22, 6, 3, 64, strategyBtultra},   // level 18
+		{23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19
+		{25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20
+		{26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21
+		{27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22
+	},
+	{ // for srcSize <= 256 KB
+		// W,  C,  H,  S,  L,  T, strat
+		{18, 12, 13, 1, 5, 1, strategyFast},        // base for negative levels
+		{18, 13, 14, 1, 6, 0, strategyFast},        // level  1
+		{18, 14, 14, 1, 5, 1, strategyDfast},       // level  2
+		{18, 16, 16, 1, 4, 1, strategyDfast},       // level  3
+		{18, 16, 17, 2, 5, 2, strategyGreedy},      // level  4.
+		{18, 18, 18, 3, 5, 2, strategyGreedy},      // level  5.
+		{18, 18, 19, 3, 5, 4, strategyLazy},        // level  6.
+		{18, 18, 19, 4, 4, 4, strategyLazy},        // level  7
+		{18, 18, 19, 4, 4, 8, strategyLazy2},       // level  8
+		{18, 18, 19, 5, 4, 8, strategyLazy2},       // level  9
+		{18, 18, 19, 6, 4, 8, strategyLazy2},       // level 10
+		{18, 18, 19, 5, 4, 12, strategyBtlazy2},    // level 11.
+		{18, 19, 19, 7, 4, 12, strategyBtlazy2},    // level 12.
+		{18, 18, 19, 4, 4, 16, strategyBtopt},      // level 13
+		{18, 18, 19, 4, 3, 32, strategyBtopt},      // level 14.
+		{18, 18, 19, 6, 3, 128, strategyBtopt},     // level 15.
+		{18, 19, 19, 6, 3, 128, strategyBtultra},   // level 16.
+		{18, 19, 19, 8, 3, 256, strategyBtultra},   // level 17.
+		{18, 19, 19, 6, 3, 128, strategyBtultra2},  // level 18.
+		{18, 19, 19, 8, 3, 256, strategyBtultra2},  // level 19.
+		{18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20.
+		{18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21.
+		{18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22.
+	},
+	{ // for srcSize <= 128 KB
+		// W,  C,  H,  S,  L,  T, strat
+		{17, 12, 12, 1, 5, 1, strategyFast},        // base for negative levels
+		{17, 12, 13, 1, 6, 0, strategyFast},        // level  1
+		{17, 13, 15, 1, 5, 0, strategyFast},        // level  2
+		{17, 15, 16, 2, 5, 1, strategyDfast},       // level  3
+		{17, 17, 17, 2, 4, 1, strategyDfast},       // level  4
+		{17, 16, 17, 3, 4, 2, strategyGreedy},      // level  5
+		{17, 17, 17, 3, 4, 4, strategyLazy},        // level  6
+		{17, 17, 17, 3, 4, 8, strategyLazy2},       // level  7
+		{17, 17, 17, 4, 4, 8, strategyLazy2},       // level  8
+		{17, 17, 17, 5, 4, 8, strategyLazy2},       // level  9
+		{17, 17, 17, 6, 4, 8, strategyLazy2},       // level 10
+		{17, 17, 17, 5, 4, 8, strategyBtlazy2},     // level 11
+		{17, 18, 17, 7, 4, 12, strategyBtlazy2},    // level 12
+		{17, 18, 17, 3, 4, 12, strategyBtopt},      // level 13.
+		{17, 18, 17, 4, 3, 32, strategyBtopt},      // level 14.
+		{17, 18, 17, 6, 3, 256, strategyBtopt},     // level 15.
+		{17, 18, 17, 6, 3, 128, strategyBtultra},   // level 16.
+		{17, 18, 17, 8, 3, 256, strategyBtultra},   // level 17.
+		{17, 18, 17, 10, 3, 512, strategyBtultra},  // level 18.
+		{17, 18, 17, 5, 3, 256, strategyBtultra2},  // level 19.
+		{17, 18, 17, 7, 3, 512, strategyBtultra2},  // level 20.
+		{17, 18, 17, 9, 3, 512, strategyBtultra2},  // level 21.
+		{17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22.
+	},
+	{ // for srcSize <= 16 KB
+		// W,  C,  H,  S,  L,  T, strat
+		{14, 12, 13, 1, 5, 1, strategyFast},        // base for negative levels
+		{14, 14, 15, 1, 5, 0, strategyFast},        // level  1
+		{14, 14, 15, 1, 4, 0, strategyFast},        // level  2
+		{14, 14, 15, 2, 4, 1, strategyDfast},       // level  3
+		{14, 14, 14, 4, 4, 2, strategyGreedy},      // level  4
+		{14, 14, 14, 3, 4, 4, strategyLazy},        // level  5.
+		{14, 14, 14, 4, 4, 8, strategyLazy2},       // level  6
+		{14, 14, 14, 6, 4, 8, strategyLazy2},       // level  7
+		{14, 14, 14, 8, 4, 8, strategyLazy2},       // level  8.
+		{14, 15, 14, 5, 4, 8, strategyBtlazy2},     // level  9.
+		{14, 15, 14, 9, 4, 8, strategyBtlazy2},     // level 10.
+		{14, 15, 14, 3, 4, 12, strategyBtopt},      // level 11.
+		{14, 15, 14, 4, 3, 24, strategyBtopt},      // level 12.
+		{14, 15, 14, 5, 3, 32, strategyBtultra},    // level 13.
+		{14, 15, 15, 6, 3, 64, strategyBtultra},    // level 14.
+		{14, 15, 15, 7, 3, 256, strategyBtultra},   // level 15.
+		{14, 15, 15, 5, 3, 48, strategyBtultra2},   // level 16.
+		{14, 15, 15, 6, 3, 128, strategyBtultra2},  // level 17.
+		{14, 15, 15, 7, 3, 256, strategyBtultra2},  // level 18.
+		{14, 15, 15, 8, 3, 256, strategyBtultra2},  // level 19.
+		{14, 15, 15, 8, 3, 512, strategyBtultra2},  // level 20.
+		{14, 15, 15, 9, 3, 512, strategyBtultra2},  // level 21.
+		{14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
+	},
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
new file mode 100644
index 0000000..366dd66
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -0,0 +1,539 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	rdebug "runtime/debug"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Encoder provides encoding to Zstandard.
+// An Encoder can be used for either compressing a stream via the
+// io.WriteCloser interface supported by the Encoder or as multiple independent
+// tasks via the EncodeAll function.
+// Smaller encodes are encouraged to use the EncodeAll function.
+// Use NewWriter to create a new instance.
+type Encoder struct {
+	o        encoderOptions
+	encoders chan encoder
+	state    encoderState
+	init     sync.Once
+}
+
+type encoder interface {
+	Encode(blk *blockEnc, src []byte)
+	EncodeNoHist(blk *blockEnc, src []byte)
+	Block() *blockEnc
+	CRC() *xxhash.Digest
+	AppendCRC([]byte) []byte
+	WindowSize(size int) int32
+	UseBlock(*blockEnc)
+	Reset()
+}
+
+type encoderState struct {
+	w             io.Writer
+	filling       []byte
+	current       []byte
+	previous      []byte
+	encoder       encoder
+	writing       *blockEnc
+	err           error
+	writeErr      error
+	nWritten      int64
+	headerWritten bool
+	eofWritten    bool
+
+	// This waitgroup indicates an encode is running.
+	wg sync.WaitGroup
+	// This waitgroup indicates we have a block encoding/writing.
+	wWg sync.WaitGroup
+}
+
+// NewWriter will create a new Zstandard encoder.
+// If the encoder will be used for encoding blocks a nil writer can be used.
+func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+	initPredefined()
+	var e Encoder
+	e.o.setDefault()
+	for _, o := range opts {
+		err := o(&e.o)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if w != nil {
+		e.Reset(w)
+	} else {
+		e.init.Do(func() {
+			e.initialize()
+		})
+	}
+	return &e, nil
+}
+
+func (e *Encoder) initialize() {
+	e.encoders = make(chan encoder, e.o.concurrent)
+	for i := 0; i < e.o.concurrent; i++ {
+		e.encoders <- e.o.encoder()
+	}
+}
+
+// Reset will re-initialize the writer and new writes will encode to the supplied writer
+// as a new, independent stream.
+func (e *Encoder) Reset(w io.Writer) {
+	e.init.Do(func() {
+		e.initialize()
+	})
+	s := &e.state
+	s.wg.Wait()
+	s.wWg.Wait()
+	if cap(s.filling) == 0 {
+		s.filling = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.current) == 0 {
+		s.current = make([]byte, 0, e.o.blockSize)
+	}
+	if cap(s.previous) == 0 {
+		s.previous = make([]byte, 0, e.o.blockSize)
+	}
+	if s.encoder == nil {
+		s.encoder = e.o.encoder()
+	}
+	if s.writing == nil {
+		s.writing = &blockEnc{}
+		s.writing.init()
+	}
+	s.writing.initNewEncode()
+	s.filling = s.filling[:0]
+	s.current = s.current[:0]
+	s.previous = s.previous[:0]
+	s.encoder.Reset()
+	s.headerWritten = false
+	s.eofWritten = false
+	s.w = w
+	s.err = nil
+	s.nWritten = 0
+	s.writeErr = nil
+}
+
+// Write data to the encoder.
+// Input data will be buffered and as the buffer fills up
+// content will be compressed and written to the output.
+// When done writing, use Close to flush the remaining output
+// and write CRC if requested.
+func (e *Encoder) Write(p []byte) (n int, err error) {
+	s := &e.state
+	for len(p) > 0 {
+		if len(p)+len(s.filling) < e.o.blockSize {
+			if e.o.crc {
+				_, _ = s.encoder.CRC().Write(p)
+			}
+			s.filling = append(s.filling, p...)
+			return n + len(p), nil
+		}
+		add := p
+		if len(p)+len(s.filling) > e.o.blockSize {
+			add = add[:e.o.blockSize-len(s.filling)]
+		}
+		if e.o.crc {
+			_, _ = s.encoder.CRC().Write(add)
+		}
+		s.filling = append(s.filling, add...)
+		p = p[len(add):]
+		n += len(add)
+		if len(s.filling) < e.o.blockSize {
+			return n, nil
+		}
+		err := e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		if debug && len(s.filling) > 0 {
+			panic(len(s.filling))
+		}
+	}
+	return n, nil
+}
+
+// nextBlock will synchronize and start compressing input in e.state.filling.
+// If an error has occurred during encoding it will be returned.
+func (e *Encoder) nextBlock(final bool) error {
+	s := &e.state
+	// Wait for current block.
+	s.wg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	if len(s.filling) > e.o.blockSize {
+		return fmt.Errorf("block > maxStoreBlockSize")
+	}
+	if !s.headerWritten {
+		var tmp [maxHeaderSize]byte
+		fh := frameHeader{
+			ContentSize:   0,
+			WindowSize:    uint32(s.encoder.WindowSize(0)),
+			SingleSegment: false,
+			Checksum:      e.o.crc,
+			DictID:        0,
+		}
+		dst, err := fh.appendTo(tmp[:0])
+		if err != nil {
+			return err
+		}
+		s.headerWritten = true
+		s.wWg.Wait()
+		var n2 int
+		n2, s.err = s.w.Write(dst)
+		if s.err != nil {
+			return s.err
+		}
+		s.nWritten += int64(n2)
+	}
+	if s.eofWritten {
+		// Ensure we only write it once.
+		final = false
+	}
+
+	if len(s.filling) == 0 {
+		// Final block, but no data.
+		if final {
+			enc := s.encoder
+			blk := enc.Block()
+			blk.reset(nil)
+			blk.last = true
+			blk.encodeRaw(nil)
+			s.wWg.Wait()
+			_, s.err = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+			s.eofWritten = true
+		}
+		return s.err
+	}
+
+	// Move blocks forward.
+	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+	s.wg.Add(1)
+	go func(src []byte) {
+		if debug {
+			println("Adding block,", len(src), "bytes, final:", final)
+		}
+		defer func() {
+			if r := recover(); r != nil {
+				s.err = fmt.Errorf("panic while encoding: %v", r)
+				rdebug.PrintStack()
+			}
+			s.wg.Done()
+		}()
+		enc := s.encoder
+		blk := enc.Block()
+		enc.Encode(blk, src)
+		blk.last = final
+		if final {
+			s.eofWritten = true
+		}
+		// Wait for pending writes.
+		s.wWg.Wait()
+		if s.writeErr != nil {
+			s.err = s.writeErr
+			return
+		}
+		// Transfer encoders from previous write block.
+		blk.swapEncoders(s.writing)
+		// Transfer recent offsets to next.
+		enc.UseBlock(s.writing)
+		s.writing = blk
+		s.wWg.Add(1)
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
+					rdebug.PrintStack()
+				}
+				s.wWg.Done()
+			}()
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+				err = blk.encode(e.o.noEntropy)
+			}
+			switch err {
+			case errIncompressible:
+				if debug {
+					println("Storing incompressible block as raw")
+				}
+				blk.encodeRaw(src)
+				// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+			case nil:
+			default:
+				s.writeErr = err
+				return
+			}
+			_, s.writeErr = s.w.Write(blk.output)
+			s.nWritten += int64(len(blk.output))
+		}()
+	}(s.current)
+	return nil
+}
+
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
+	if debug {
+		println("Using ReadFrom")
+	}
+	// Maybe handle stuff queued?
+	e.state.filling = e.state.filling[:e.o.blockSize]
+	src := e.state.filling
+	for {
+		n2, err := r.Read(src)
+		_, _ = e.state.encoder.CRC().Write(src[:n2])
+		// src is now the unfilled part...
+		src = src[n2:]
+		n += int64(n2)
+		switch err {
+		case io.EOF:
+			e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
+			if debug {
+				println("ReadFrom: got EOF final block:", len(e.state.filling))
+			}
+			return n, e.nextBlock(true)
+		default:
+			if debug {
+				println("ReadFrom: got error:", err)
+			}
+			e.state.err = err
+			return n, err
+		case nil:
+		}
+		if len(src) > 0 {
+			if debug {
+				println("ReadFrom: got space left in source:", len(src))
+			}
+			continue
+		}
+		err = e.nextBlock(false)
+		if err != nil {
+			return n, err
+		}
+		e.state.filling = e.state.filling[:e.o.blockSize]
+		src = e.state.filling
+	}
+}
+
+// Flush will send the currently written data to output
+// and block until everything has been written.
+// This should only be used on rare occasions where pushing the currently queued data is critical.
+func (e *Encoder) Flush() error {
+	s := &e.state
+	if len(s.filling) > 0 {
+		err := e.nextBlock(false)
+		if err != nil {
+			return err
+		}
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+	if s.err != nil {
+		return s.err
+	}
+	return s.writeErr
+}
+
+// Close will flush the final output and close the stream.
+// The function will block until everything has been written.
+// The Encoder can still be re-used after calling this.
+func (e *Encoder) Close() error {
+	s := &e.state
+	if s.encoder == nil {
+		return nil
+	}
+	err := e.nextBlock(true)
+	if err != nil {
+		return err
+	}
+	s.wg.Wait()
+	s.wWg.Wait()
+
+	if s.err != nil {
+		return s.err
+	}
+	if s.writeErr != nil {
+		return s.writeErr
+	}
+
+	// Write CRC
+	if e.o.crc && s.err == nil {
+		// heap alloc.
+		var tmp [4]byte
+		_, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
+		s.nWritten += 4
+	}
+
+	// Add padding with content from crypto/rand.Reader
+	if s.err == nil && e.o.pad > 0 {
+		add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
+		frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
+		if err != nil {
+			return err
+		}
+		_, s.err = s.w.Write(frame)
+	}
+	return s.err
+}
+
+// EncodeAll will encode all input in src and append it to dst.
+// This function can be called concurrently, but each call will only run on a single goroutine.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
+// Encoded blocks can be concatenated and the result will be the combined input stream.
+// Data compressed with EncodeAll can be decoded with the Decoder,
+// using either a stream or DecodeAll.
+func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+	if len(src) == 0 {
+		if e.o.fullZero {
+			// Add frame header.
+			fh := frameHeader{
+				ContentSize:   0,
+				WindowSize:    MinWindowSize,
+				SingleSegment: true,
+				// Adding a checksum would be a waste of space.
+				Checksum: false,
+				DictID:   0,
+			}
+			dst, _ = fh.appendTo(dst)
+
+			// Write raw block as last one only.
+			var blk blockHeader
+			blk.setSize(0)
+			blk.setType(blockTypeRaw)
+			blk.setLast(true)
+			dst = blk.appendTo(dst)
+		}
+		return dst
+	}
+	e.init.Do(func() {
+		e.o.setDefault()
+		e.initialize()
+	})
+	enc := <-e.encoders
+	defer func() {
+		// Release encoder reference to last block.
+		enc.Reset()
+		e.encoders <- enc
+	}()
+	enc.Reset()
+	blk := enc.Block()
+	// Use single segments when above minimum window and below 1MB.
+	single := len(src) < 1<<20 && len(src) > MinWindowSize
+	if e.o.single != nil {
+		single = *e.o.single
+	}
+	fh := frameHeader{
+		ContentSize:   uint64(len(src)),
+		WindowSize:    uint32(enc.WindowSize(len(src))),
+		SingleSegment: single,
+		Checksum:      e.o.crc,
+		DictID:        0,
+	}
+
+	// If less than 1MB, allocate a buffer up front.
+	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 {
+		dst = make([]byte, 0, len(src))
+	}
+	dst, err := fh.appendTo(dst)
+	if err != nil {
+		panic(err)
+	}
+
+	if len(src) <= e.o.blockSize && len(src) <= maxBlockSize {
+		// Slightly faster with no history and everything in one block.
+		if e.o.crc {
+			_, _ = enc.CRC().Write(src)
+		}
+		blk.reset(nil)
+		blk.last = true
+		enc.EncodeNoHist(blk, src)
+
+		// If we got the exact same number of literals as input,
+		// assume the literals cannot be compressed.
+		err := errIncompressible
+		oldout := blk.output
+		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
+			// Output directly to dst
+			blk.output = dst
+			err = blk.encode(e.o.noEntropy)
+		}
+
+		switch err {
+		case errIncompressible:
+			if debug {
+				println("Storing incompressible block as raw")
+			}
+			dst = blk.encodeRawTo(dst, src)
+		case nil:
+			dst = blk.output
+		default:
+			panic(err)
+		}
+		blk.output = oldout
+	} else {
+		for len(src) > 0 {
+			todo := src
+			if len(todo) > e.o.blockSize {
+				todo = todo[:e.o.blockSize]
+			}
+			src = src[len(todo):]
+			if e.o.crc {
+				_, _ = enc.CRC().Write(todo)
+			}
+			blk.reset(nil)
+			blk.pushOffsets()
+			enc.Encode(blk, todo)
+			if len(src) == 0 {
+				blk.last = true
+			}
+			err := errIncompressible
+			// If we got the exact same number of literals as input,
+			// assume the literals cannot be compressed.
+			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
+				err = blk.encode(e.o.noEntropy)
+			}
+
+			switch err {
+			case errIncompressible:
+				if debug {
+					println("Storing incompressible block as raw")
+				}
+				dst = blk.encodeRawTo(dst, todo)
+				blk.popOffsets()
+			case nil:
+				dst = append(dst, blk.output...)
+			default:
+				panic(err)
+			}
+		}
+	}
+	if e.o.crc {
+		dst = enc.AppendCRC(dst)
+	}
+	// Add padding with content from crypto/rand.Reader
+	if e.o.pad > 0 {
+		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		dst, err = skippableFrame(dst, add, rand.Reader)
+		if err != nil {
+			panic(err)
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
new file mode 100644
index 0000000..40eb457
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -0,0 +1,231 @@
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+// EOption is an option for creating a encoder.
+type EOption func(*encoderOptions) error
+
+// options retains accumulated state of multiple options.
+type encoderOptions struct {
+	concurrent int
+	crc        bool
+	single     *bool
+	pad        int
+	blockSize  int
+	windowSize int
+	level      EncoderLevel
+	fullZero   bool
+	noEntropy  bool
+}
+
+func (o *encoderOptions) setDefault() {
+	*o = encoderOptions{
+		// use less ram: true for now, but may change.
+		concurrent: runtime.GOMAXPROCS(0),
+		crc:        true,
+		single:     nil,
+		blockSize:  1 << 16,
+		windowSize: 1 << 22,
+		level:      SpeedDefault,
+	}
+}
+
+// encoder returns an encoder with the selected options.
+func (o encoderOptions) encoder() encoder {
+	switch o.level {
+	case SpeedDefault:
+		return &doubleFastEncoder{fastEncoder: fastEncoder{maxMatchOff: int32(o.windowSize)}}
+	case SpeedFastest:
+		return &fastEncoder{maxMatchOff: int32(o.windowSize)}
+	}
+	panic("unknown compression level")
+}
+
+// WithEncoderCRC will add CRC value to output.
+// Output will be 4 bytes larger.
+func WithEncoderCRC(b bool) EOption {
+	return func(o *encoderOptions) error { o.crc = b; return nil }
+}
+
+// WithEncoderConcurrency will set the concurrency,
+// meaning the maximum number of decoders to run concurrently.
+// The value supplied must be at least 1.
+// By default this will be set to GOMAXPROCS.
+func WithEncoderConcurrency(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("concurrency must be at least 1")
+		}
+		o.concurrent = n
+		return nil
+	}
+}
+
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between WindowSizeMin and WindowSizeMax.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level.
+func WithWindowSize(n int) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case n < MinWindowSize:
+			return fmt.Errorf("window size must be at least %d", MinWindowSize)
+		case n > MaxWindowSize:
+			return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+		case (n & (n - 1)) != 0:
+			return errors.New("window size must be a power of 2")
+		}
+
+		o.windowSize = n
+		if o.blockSize > o.windowSize {
+			o.blockSize = o.windowSize
+		}
+		return nil
+	}
+}
+
+// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 1GB, 1<<30 bytes.
+// The padded area will be filled with data from crypto/rand.Reader.
+// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+func WithEncoderPadding(n int) EOption {
+	return func(o *encoderOptions) error {
+		if n <= 0 {
+			return fmt.Errorf("padding must be at least 1")
+		}
+		// No need to waste our time.
+		if n == 1 {
+			o.pad = 0
+		}
+		if n > 1<<30 {
+			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
+		}
+		o.pad = n
+		return nil
+	}
+}
+
+// EncoderLevel predefines encoder compression levels.
+// Only use the constants made available, since the actual mapping
+// of these values are very likely to change and your compression could change
+// unpredictably when upgrading the library.
+type EncoderLevel int
+
+const (
+	speedNotSet EncoderLevel = iota
+
+	// SpeedFastest will choose the fastest reasonable compression.
+	// This is roughly equivalent to the fastest Zstandard mode.
+	SpeedFastest
+
+	// SpeedDefault is the default "pretty fast" compression option.
+	// This is roughly equivalent to the default Zstandard mode (level 3).
+	SpeedDefault
+
+	// speedLast should be kept as the last actual compression option.
+	// The is not for external usage, but is used to keep track of the valid options.
+	speedLast
+
+	// SpeedBetterCompression will (in the future) yield better compression than the default,
+	// but at approximately 4x the CPU usage of the default.
+	// For now this is not implemented.
+	SpeedBetterCompression = SpeedDefault
+
+	// SpeedBestCompression will choose the best available compression option.
+	// For now this is not implemented.
+	SpeedBestCompression = SpeedDefault
+)
+
+// EncoderLevelFromString will convert a string representation of an encoding level back
+// to a compression level. The compare is not case sensitive.
+// If the string wasn't recognized, (false, SpeedDefault) will be returned.
+func EncoderLevelFromString(s string) (bool, EncoderLevel) {
+	for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ {
+		if strings.EqualFold(s, l.String()) {
+			return true, l
+		}
+	}
+	return false, SpeedDefault
+}
+
+// EncoderLevelFromZstd will return an encoder level that closest matches the compression
+// ratio of a specific zstd compression level.
+// Many input values will provide the same compression level.
+func EncoderLevelFromZstd(level int) EncoderLevel {
+	switch {
+	case level < 3:
+		return SpeedFastest
+	case level >= 3:
+		return SpeedDefault
+	}
+	return SpeedDefault
+}
+
+// String provides a string representation of the compression level.
+func (e EncoderLevel) String() string {
+	switch e {
+	case SpeedFastest:
+		return "fastest"
+	case SpeedDefault:
+		return "default"
+	default:
+		return "invalid"
+	}
+}
+
+// WithEncoderLevel specifies a predefined compression level.
+func WithEncoderLevel(l EncoderLevel) EOption {
+	return func(o *encoderOptions) error {
+		switch {
+		case l <= speedNotSet || l >= speedLast:
+			return fmt.Errorf("unknown encoder level")
+		}
+		o.level = l
+		return nil
+	}
+}
+
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.fullZero = b
+		return nil
+	}
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.noEntropy = b
+		return nil
+	}
+}
+
+// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
+// If this flag is set, data must be regenerated within a single continuous memory segment.
+// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
+// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
+// In order to preserve the decoder from unreasonable memory requirements,
+// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
+// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
+// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
+// If this is not specified, block encodes will automatically choose this based on the input size.
+// This setting has no effect on streamed encodes.
+func WithSingleSegment(b bool) EOption {
+	return func(o *encoderOptions) error {
+		o.single = &b
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
new file mode 100644
index 0000000..4079074
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -0,0 +1,489 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type frameDec struct {
+	o         decoderOptions
+	crc       hash.Hash64
+	frameDone sync.WaitGroup
+	offset    int64
+
+	WindowSize       uint64
+	DictionaryID     uint32
+	FrameContentSize uint64
+	HasCheckSum      bool
+	SingleSegment    bool
+
+	// maxWindowSize is the maximum windows size to support.
+	// should never be bigger than max-int.
+	maxWindowSize uint64
+
+	// In order queue of blocks being decoded.
+	decoding chan *blockDec
+
+	// Frame history passed between blocks
+	history history
+
+	rawInput byteBuffer
+
+	// Byte buffer that can be reused for small input blocks.
+	bBuf byteBuf
+
+	// asyncRunning indicates whether the async routine processes input on 'decoding'.
+	asyncRunning   bool
+	asyncRunningMu sync.Mutex
+}
+
+const (
+	// The minimum Window_Size is 1 KB.
+	MinWindowSize = 1 << 10
+	MaxWindowSize = 1 << 30
+)
+
+var (
+	frameMagic          = []byte{0x28, 0xb5, 0x2f, 0xfd}
+	skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+)
+
+func newFrameDec(o decoderOptions) *frameDec {
+	d := frameDec{
+		o:             o,
+		maxWindowSize: MaxWindowSize,
+	}
+	if d.maxWindowSize > o.maxDecodedSize {
+		d.maxWindowSize = o.maxDecodedSize
+	}
+	return &d
+}
+
+// reset will read the frame header and prepare for block decoding.
+// If nothing can be read from the input, io.EOF will be returned.
+// Any other error indicated that the stream contained data, but
+// there was a problem.
+func (d *frameDec) reset(br byteBuffer) error {
+	d.HasCheckSum = false
+	d.WindowSize = 0
+	var b []byte
+	for {
+		b = br.readSmall(4)
+		if b == nil {
+			return io.EOF
+		}
+		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+			if debug {
+				println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic))
+			}
+			// Break if not skippable frame.
+			break
+		}
+		// Read size to skip
+		b = br.readSmall(4)
+		if b == nil {
+			println("Reading Frame Size EOF")
+			return io.ErrUnexpectedEOF
+		}
+		n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		println("Skipping frame with", n, "bytes.")
+		err := br.skipN(int(n))
+		if err != nil {
+			if debug {
+				println("Reading discarded frame", err)
+			}
+			return err
+		}
+	}
+	if !bytes.Equal(b, frameMagic) {
+		println("Got magic numbers: ", b, "want:", frameMagic)
+		return ErrMagicMismatch
+	}
+
+	// Read Frame_Header_Descriptor
+	fhd, err := br.readByte()
+	if err != nil {
+		println("Reading Frame_Header_Descriptor", err)
+		return err
+	}
+	d.SingleSegment = fhd&(1<<5) != 0
+
+	if fhd&(1<<3) != 0 {
+		return errors.New("Reserved bit set on frame header")
+	}
+
+	// Read Window_Descriptor
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+	d.WindowSize = 0
+	if !d.SingleSegment {
+		wd, err := br.readByte()
+		if err != nil {
+			println("Reading Window_Descriptor", err)
+			return err
+		}
+		printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		windowLog := 10 + (wd >> 3)
+		windowBase := uint64(1) << windowLog
+		windowAdd := (windowBase / 8) * uint64(wd&0x7)
+		d.WindowSize = windowBase + windowAdd
+	}
+
+	// Read Dictionary_ID
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+	d.DictionaryID = 0
+	if size := fhd & 3; size != 0 {
+		if size == 3 {
+			size = 4
+		}
+		b = br.readSmall(int(size))
+		if b == nil {
+			if debug {
+				println("Reading Dictionary_ID", io.ErrUnexpectedEOF)
+			}
+			return io.ErrUnexpectedEOF
+		}
+		switch size {
+		case 1:
+			d.DictionaryID = uint32(b[0])
+		case 2:
+			d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+		case 4:
+			d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+		}
+		if debug {
+			println("Dict size", size, "ID:", d.DictionaryID)
+		}
+		if d.DictionaryID != 0 {
+			return ErrUnknownDictionary
+		}
+	}
+
+	// Read Frame_Content_Size
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+	var fcsSize int
+	v := fhd >> 6
+	switch v {
+	case 0:
+		if d.SingleSegment {
+			fcsSize = 1
+		}
+	default:
+		fcsSize = 1 << v
+	}
+	d.FrameContentSize = 0
+	if fcsSize > 0 {
+		b := br.readSmall(fcsSize)
+		if b == nil {
+			println("Reading Frame content", io.ErrUnexpectedEOF)
+			return io.ErrUnexpectedEOF
+		}
+		switch fcsSize {
+		case 1:
+			d.FrameContentSize = uint64(b[0])
+		case 2:
+			// When FCS_Field_Size is 2, the offset of 256 is added.
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+		case 4:
+			d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+		case 8:
+			d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+			d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+			d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+		}
+		if debug {
+			println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+		}
+	}
+	// Move this to shared.
+	d.HasCheckSum = fhd&(1<<2) != 0
+	if d.HasCheckSum {
+		if d.crc == nil {
+			d.crc = xxhash.New()
+		}
+		d.crc.Reset()
+	}
+
+	if d.WindowSize == 0 && d.SingleSegment {
+		// We may not need window in this case.
+		d.WindowSize = d.FrameContentSize
+		if d.WindowSize < MinWindowSize {
+			d.WindowSize = MinWindowSize
+		}
+	}
+
+	if d.WindowSize > d.maxWindowSize {
+		printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize)
+		return ErrWindowSizeExceeded
+	}
+	// The minimum Window_Size is 1 KB.
+	if d.WindowSize < MinWindowSize {
+		println("got window size: ", d.WindowSize)
+		return ErrWindowSizeTooSmall
+	}
+	d.history.windowSize = int(d.WindowSize)
+	d.history.maxSize = d.history.windowSize + maxBlockSize
+	// history contains input - maybe we do something
+	d.rawInput = br
+	return nil
+}
+
+// next will start decoding the next block from stream.
+func (d *frameDec) next(block *blockDec) error {
+	if debug {
+		printf("decoding new block %p:%p", block, block.data)
+	}
+	err := block.reset(d.rawInput, d.WindowSize)
+	if err != nil {
+		println("block error:", err)
+		// Signal the frame decoder we have a problem.
+		d.sendErr(block, err)
+		return err
+	}
+	block.input <- struct{}{}
+	if debug {
+		println("next block:", block)
+	}
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return nil
+	}
+	if block.Last {
+		// We indicate the frame is done by sending io.EOF
+		d.decoding <- block
+		return io.EOF
+	}
+	d.decoding <- block
+	return nil
+}
+
+// sendEOF will queue an error block on the frame.
+// This will cause the frame decoder to return when it encounters the block.
+// Returns true if the decoder was added.
+func (d *frameDec) sendErr(block *blockDec, err error) bool {
+	d.asyncRunningMu.Lock()
+	defer d.asyncRunningMu.Unlock()
+	if !d.asyncRunning {
+		return false
+	}
+
+	println("sending error", err.Error())
+	block.sendErr(err)
+	d.decoding <- block
+	return true
+}
+
+// checkCRC will check the checksum if the frame has one.
+// Will return ErrCRCMismatch if crc check failed, otherwise nil.
+func (d *frameDec) checkCRC() error {
+	if !d.HasCheckSum {
+		return nil
+	}
+	var tmp [4]byte
+	got := d.crc.Sum64()
+	// Flip to match file order.
+	tmp[0] = byte(got >> 0)
+	tmp[1] = byte(got >> 8)
+	tmp[2] = byte(got >> 16)
+	tmp[3] = byte(got >> 24)
+
+	// We can overwrite upper tmp now
+	want := d.rawInput.readSmall(4)
+	if want == nil {
+		println("CRC missing?")
+		return io.ErrUnexpectedEOF
+	}
+
+	if !bytes.Equal(tmp[:], want) {
+		if debug {
+			println("CRC Check Failed:", tmp[:], "!=", want)
+		}
+		return ErrCRCMismatch
+	}
+	if debug {
+		println("CRC ok", tmp[:])
+	}
+	return nil
+}
+
+func (d *frameDec) initAsync() {
+	if !d.o.lowMem && !d.SingleSegment {
+		// set max extra size history to 20MB.
+		d.history.maxSize = d.history.windowSize + maxBlockSize*10
+	}
+	// re-alloc if more than one extra block size.
+	if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.history.b) < d.history.maxSize {
+		d.history.b = make([]byte, 0, d.history.maxSize)
+	}
+	if cap(d.decoding) < d.o.concurrent {
+		d.decoding = make(chan *blockDec, d.o.concurrent)
+	}
+	if debug {
+		h := d.history
+		printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
+	}
+	d.asyncRunningMu.Lock()
+	d.asyncRunning = true
+	d.asyncRunningMu.Unlock()
+}
+
+// startDecoder will start decoding blocks and write them to the writer.
+// The decoder will stop as soon as an error occurs or at end of frame.
+// When the frame has finished decoding the *bufio.Reader
+// containing the remaining input will be sent on frameDec.frameDone.
+func (d *frameDec) startDecoder(output chan decodeOutput) {
+	// TODO: Init to dictionary
+	d.history.reset()
+	written := int64(0)
+
+	defer func() {
+		d.asyncRunningMu.Lock()
+		d.asyncRunning = false
+		d.asyncRunningMu.Unlock()
+
+		// Drain the currently decoding.
+		d.history.error = true
+	flushdone:
+		for {
+			select {
+			case b := <-d.decoding:
+				b.history <- &d.history
+				output <- <-b.result
+			default:
+				break flushdone
+			}
+		}
+		println("frame decoder done, signalling done")
+		d.frameDone.Done()
+	}()
+	// Get decoder for first block.
+	block := <-d.decoding
+	block.history <- &d.history
+	for {
+		var next *blockDec
+		// Get result
+		r := <-block.result
+		if r.err != nil {
+			println("Result contained error", r.err)
+			output <- r
+			return
+		}
+		if debug {
+			println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
+			d.offset += int64(len(r.b))
+		}
+		if !block.Last {
+			// Send history to next block
+			select {
+			case next = <-d.decoding:
+				if debug {
+					println("Sending ", len(d.history.b), "bytes as history")
+				}
+				next.history <- &d.history
+			default:
+				// Wait until we have sent the block, so
+				// other decoders can potentially get the decoder.
+				next = nil
+			}
+		}
+
+		// Add checksum, async to decoding.
+		if d.HasCheckSum {
+			n, err := d.crc.Write(r.b)
+			if err != nil {
+				r.err = err
+				if n != len(r.b) {
+					r.err = io.ErrShortWrite
+				}
+				output <- r
+				return
+			}
+		}
+		written += int64(len(r.b))
+		if d.SingleSegment && uint64(written) > d.FrameContentSize {
+			println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
+			r.err = ErrFrameSizeExceeded
+			output <- r
+			return
+		}
+		if block.Last {
+			r.err = d.checkCRC()
+			output <- r
+			return
+		}
+		output <- r
+		if next == nil {
+			// There was no decoder available, we wait for one now that we have sent to the writer.
+			if debug {
+				println("Sending ", len(d.history.b), " bytes as history")
+			}
+			next = <-d.decoding
+			next.history <- &d.history
+		}
+		block = next
+	}
+}
+
+// runDecoder will create a sync decoder that will decode a block of data.
+func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
+	// TODO: Init to dictionary
+	d.history.reset()
+	saved := d.history.b
+
+	// We use the history for output to avoid copying it.
+	d.history.b = dst
+	// Store input length, so we only check new data.
+	crcStart := len(dst)
+	var err error
+	for {
+		err = dec.reset(d.rawInput, d.WindowSize)
+		if err != nil {
+			break
+		}
+		if debug {
+			println("next block:", dec)
+		}
+		err = dec.decodeBuf(&d.history)
+		if err != nil || dec.Last {
+			break
+		}
+		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
+			println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+			err = ErrFrameSizeExceeded
+			break
+		}
+	}
+	dst = d.history.b
+	if err == nil {
+		if d.HasCheckSum {
+			var n int
+			n, err = d.crc.Write(dst[crcStart:])
+			if err == nil {
+				if n != len(dst)-crcStart {
+					err = io.ErrShortWrite
+				} else {
+					err = d.checkCRC()
+				}
+			}
+		}
+	}
+	d.history.b = saved
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
new file mode 100644
index 0000000..4479cfe
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -0,0 +1,115 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"math/bits"
+)
+
+type frameHeader struct {
+	ContentSize   uint64
+	WindowSize    uint32
+	SingleSegment bool
+	Checksum      bool
+	DictID        uint32 // Not stored.
+}
+
+const maxHeaderSize = 14
+
+func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+	dst = append(dst, frameMagic...)
+	var fhd uint8
+	if f.Checksum {
+		fhd |= 1 << 2
+	}
+	if f.SingleSegment {
+		fhd |= 1 << 5
+	}
+	var fcs uint8
+	if f.ContentSize >= 256 {
+		fcs++
+	}
+	if f.ContentSize >= 65536+256 {
+		fcs++
+	}
+	if f.ContentSize >= 0xffffffff {
+		fcs++
+	}
+	fhd |= fcs << 6
+
+	dst = append(dst, fhd)
+	if !f.SingleSegment {
+		const winLogMin = 10
+		windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
+		dst = append(dst, uint8(windowLog))
+	}
+
+	switch fcs {
+	case 0:
+		if f.SingleSegment {
+			dst = append(dst, uint8(f.ContentSize))
+		}
+		// Unless SingleSegment is set, framessizes < 256 are nto stored.
+	case 1:
+		f.ContentSize -= 256
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
+	case 2:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
+	case 3:
+		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
+			uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
+	default:
+		panic("invalid fcs")
+	}
+	return dst, nil
+}
+
+const skippableFrameHeader = 4 + 4
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+	if wantMultiple <= 0 {
+		panic("wantMultiple <= 0")
+	}
+	if written < 0 {
+		panic("written < 0")
+	}
+	leftOver := written % wantMultiple
+	if leftOver == 0 {
+		return 0
+	}
+	toAdd := wantMultiple - leftOver
+	for toAdd < skippableFrameHeader {
+		toAdd += wantMultiple
+	}
+	return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < math.MaxUint32.
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+	if total == 0 {
+		return dst, nil
+	}
+	if total < skippableFrameHeader {
+		return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
+	}
+	if int64(total) > math.MaxUint32 {
+		return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
+	}
+	dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
+	f := uint32(total - skippableFrameHeader)
+	dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
+	start := len(dst)
+	dst = append(dst, make([]byte, f)...)
+	_, err := io.ReadFull(r, dst[start:])
+	return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
new file mode 100644
index 0000000..9efe34f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -0,0 +1,384 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+)
+
+const (
+	tablelogAbsoluteMax = 9
+)
+
+const (
+	/*!MEMORY_USAGE :
+	 *  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+	 *  Increasing memory usage improves compression ratio
+	 *  Reduced memory usage can improve speed, due to cache effect
+	 *  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+	maxMemoryUsage = 11
+
+	maxTableLog    = maxMemoryUsage - 2
+	maxTablesize   = 1 << maxTableLog
+	maxTableMask   = (1 << maxTableLog) - 1
+	minTablelog    = 5
+	maxSymbolValue = 255
+)
+
+// fseDecoder provides temporary storage for compression and decompression.
+type fseDecoder struct {
+	dt             [maxTablesize]decSymbol // Decompression table.
+	symbolLen      uint16                  // Length of active part of the symbol table.
+	actualTableLog uint8                   // Selected tablelog.
+	maxBits        uint8                   // Maximum number of additional bits
+
+	// used for table creation to avoid allocations.
+	stateTable [256]uint16
+	norm       [maxSymbolValue + 1]int16
+	preDefined bool
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+	return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
+	var (
+		charnum   uint16
+		previous0 bool
+	)
+	if b.remain() < 4 {
+		return errors.New("input too small")
+	}
+	bitStream := b.Uint32()
+	nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+	if nbBits > tablelogAbsoluteMax {
+		println("Invalid tablelog:", nbBits)
+		return errors.New("tableLog too large")
+	}
+	bitStream >>= 4
+	bitCount := uint(4)
+
+	s.actualTableLog = uint8(nbBits)
+	remaining := int32((1 << nbBits) + 1)
+	threshold := int32(1 << nbBits)
+	gotTotal := int32(0)
+	nbBits++
+
+	for remaining > 1 && charnum <= maxSymbol {
+		if previous0 {
+			//println("prev0")
+			n0 := charnum
+			for (bitStream & 0xFFFF) == 0xFFFF {
+				//println("24 x 0")
+				n0 += 24
+				if r := b.remain(); r > 5 {
+					b.advance(2)
+					bitStream = b.Uint32() >> bitCount
+				} else {
+					// end of bit stream
+					bitStream >>= 16
+					bitCount += 16
+				}
+			}
+			//printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
+			for (bitStream & 3) == 3 {
+				n0 += 3
+				bitStream >>= 2
+				bitCount += 2
+			}
+			n0 += uint16(bitStream & 3)
+			bitCount += 2
+
+			if n0 > maxSymbolValue {
+				return errors.New("maxSymbolValue too small")
+			}
+			//println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
+			for charnum < n0 {
+				s.norm[uint8(charnum)] = 0
+				charnum++
+			}
+
+			if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+				b.advance(bitCount >> 3)
+				bitCount &= 7
+				bitStream = b.Uint32() >> bitCount
+			} else {
+				bitStream >>= 2
+			}
+		}
+
+		max := (2*threshold - 1) - remaining
+		var count int32
+
+		if int32(bitStream)&(threshold-1) < max {
+			count = int32(bitStream) & (threshold - 1)
+			if debug && nbBits < 1 {
+				panic("nbBits underflow")
+			}
+			bitCount += nbBits - 1
+		} else {
+			count = int32(bitStream) & (2*threshold - 1)
+			if count >= threshold {
+				count -= max
+			}
+			bitCount += nbBits
+		}
+
+		// extra accuracy
+		count--
+		if count < 0 {
+			// -1 means +1
+			remaining += count
+			gotTotal -= count
+		} else {
+			remaining -= count
+			gotTotal += count
+		}
+		s.norm[charnum&0xff] = int16(count)
+		charnum++
+		previous0 = count == 0
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		//println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "remain:", b.remain())
+		if r := b.remain(); r >= 7 || r+int(bitCount>>3) >= 4 {
+			b.advance(bitCount >> 3)
+			bitCount &= 7
+		} else {
+			bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+			b.off = len(b.b) - 4
+			//println("b.off:", b.off, "len:", len(b.b), "bc:", bitCount, "iend", iend)
+		}
+		bitStream = b.Uint32() >> (bitCount & 31)
+		//printf("bitstream is now: 0b%b", bitStream)
+	}
+	s.symbolLen = charnum
+	if s.symbolLen <= 1 {
+		return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+	}
+	if s.symbolLen > maxSymbolValue+1 {
+		return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+	}
+	if remaining != 1 {
+		return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+	}
+	if bitCount > 32 {
+		return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+	}
+	if gotTotal != 1<<s.actualTableLog {
+		return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
+	}
+	b.advance((bitCount + 7) >> 3)
+	// println(s.norm[:s.symbolLen], s.symbolLen)
+	return s.buildDtable()
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+// Using a composite uint64 is faster than a struct with separate members.
+type decSymbol uint64
+
+func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
+	return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d decSymbol) nbBits() uint8 {
+	return uint8(d)
+}
+
+func (d decSymbol) addBits() uint8 {
+	return uint8(d >> 8)
+}
+
+func (d decSymbol) newState() uint16 {
+	return uint16(d >> 16)
+}
+
+func (d decSymbol) baseline() uint32 {
+	return uint32(d >> 32)
+}
+
+func (d decSymbol) baselineInt() int {
+	return int(d >> 32)
+}
+
+func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
+	*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d *decSymbol) setNBits(nBits uint8) {
+	const mask = 0xffffffffffffff00
+	*d = (*d & mask) | decSymbol(nBits)
+}
+
+func (d *decSymbol) setAddBits(addBits uint8) {
+	const mask = 0xffffffffffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8)
+}
+
+func (d *decSymbol) setNewState(state uint16) {
+	const mask = 0xffffffff0000ffff
+	*d = (*d & mask) | decSymbol(state)<<16
+}
+
+func (d *decSymbol) setBaseline(baseline uint32) {
+	const mask = 0xffffffff
+	*d = (*d & mask) | decSymbol(baseline)<<32
+}
+
+func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
+	const mask = 0xffff00ff
+	*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
+}
+
+// decSymbolValue returns the transformed decSymbol for the given symbol.
+func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
+	if int(symb) >= len(t) {
+		return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+	}
+	lu := t[symb]
+	return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
+}
+
+// setRLE will set the decoder til RLE mode.
+func (s *fseDecoder) setRLE(symbol decSymbol) {
+	s.actualTableLog = 0
+	s.maxBits = symbol.addBits()
+	s.dt[0] = symbol
+}
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	symbolNext := s.stateTable[:256]
+
+	// Init, lay down lowprob symbols
+	{
+		for i, v := range s.norm[:s.symbolLen] {
+			if v == -1 {
+				s.dt[highThreshold].setAddBits(uint8(i))
+				highThreshold--
+				symbolNext[i] = 1
+			} else {
+				symbolNext[i] = uint16(v)
+			}
+		}
+	}
+	// Spread symbols
+	{
+		tableMask := tableSize - 1
+		step := tableStep(tableSize)
+		position := uint32(0)
+		for ss, v := range s.norm[:s.symbolLen] {
+			for i := 0; i < int(v); i++ {
+				s.dt[position].setAddBits(uint8(ss))
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					// lowprob area
+					position = (position + step) & tableMask
+				}
+			}
+		}
+		if position != 0 {
+			// position must reach all cells once, otherwise normalizedCounter is incorrect
+			return errors.New("corrupted input (position != 0)")
+		}
+	}
+
+	// Build Decoding table
+	{
+		tableSize := uint16(1 << s.actualTableLog)
+		for u, v := range s.dt[:tableSize] {
+			symbol := v.addBits()
+			nextState := symbolNext[symbol]
+			symbolNext[symbol] = nextState + 1
+			nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+			s.dt[u&maxTableMask].setNBits(nBits)
+			newState := (nextState << nBits) - tableSize
+			if newState > tableSize {
+				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+			}
+			if newState == uint16(u) && nBits == 0 {
+				// Seems weird that this is possible with nbits > 0.
+				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+			}
+			s.dt[u&maxTableMask].setNewState(newState)
+		}
+	}
+	return nil
+}
+
+// transform will transform the decoder table into a table usable for
+// decoding without having to apply the transformation while decoding.
+// The state will contain the base value and the number of bits to read.
+func (s *fseDecoder) transform(t []baseOffset) error {
+	tableSize := uint16(1 << s.actualTableLog)
+	s.maxBits = 0
+	for i, v := range s.dt[:tableSize] {
+		add := v.addBits()
+		if int(add) >= len(t) {
+			return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
+		}
+		lu := t[add]
+		if lu.addBits > s.maxBits {
+			s.maxBits = lu.addBits
+		}
+		v.setExt(lu.addBits, lu.baseLine)
+		s.dt[i] = v
+	}
+	return nil
+}
+
+type fseState struct {
+	dt    []decSymbol
+	state decSymbol
+}
+
+// Initialize and decodeAsync first state and symbol.
+func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
+	s.dt = dt
+	br.fill()
+	s.state = dt[br.getBits(tableLog)]
+}
+
+// next returns the current symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) next(br *bitReader) {
+	lowBits := uint16(br.getBits(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (s *fseState) finished(br *bitReader) bool {
+	return br.finished() && s.state.nbBits() > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (s *fseState) final() (int, uint8) {
+	return s.state.baselineInt(), s.state.addBits()
+}
+
+// final returns the current state symbol without decoding the next.
+func (s decSymbol) final() (int, uint8) {
+	return s.baselineInt(), s.addBits()
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
+	lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+	s.state = s.dt[s.state.newState()+lowBits]
+	return s.state.baseline(), s.state.addBits()
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
new file mode 100644
index 0000000..619836f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -0,0 +1,726 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"math"
+)
+
+const (
+	// For encoding we only support up to
+	maxEncTableLog    = 8
+	maxEncTablesize   = 1 << maxTableLog
+	maxEncTableMask   = (1 << maxTableLog) - 1
+	minEncTablelog    = 5
+	maxEncSymbolValue = maxMatchLengthSymbol
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type fseEncoder struct {
+	symbolLen      uint16 // Length of active part of the symbol table.
+	actualTableLog uint8  // Selected tablelog.
+	ct             cTable // Compression tables.
+	maxCount       int    // count of the most probable symbol
+	zeroBits       bool   // no bits has prob > 50%.
+	clearCount     bool   // clear count
+	useRLE         bool   // This encoder is for RLE
+	preDefined     bool   // This encoder is predefined.
+	reUsed         bool   // Set to know when the encoder has been reused.
+	rleVal         uint8  // RLE Symbol
+	maxBits        uint8  // Maximum output bits after transform.
+
+	// TODO: Technically zstd should be fine with 64 bytes.
+	count [256]uint32
+	norm  [256]int16
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+	tableSymbol []byte
+	stateTable  []uint16
+	symbolTT    []symbolTransform
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+	deltaNbBits    uint32
+	deltaFindState int16
+	outBits        uint8
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+	return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *fseEncoder) Histogram() []uint32 {
+	return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
+	s.maxCount = maxCount
+	s.symbolLen = uint16(maxSymbol) + 1
+	s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *fseEncoder) prepare() (*fseEncoder, error) {
+	if s == nil {
+		s = &fseEncoder{}
+	}
+	s.useRLE = false
+	if s.clearCount && s.maxCount == 0 {
+		for i := range s.count {
+			s.count[i] = 0
+		}
+		s.clearCount = false
+	}
+	return s, nil
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *fseEncoder) allocCtable() {
+	tableSize := 1 << s.actualTableLog
+	// get tableSymbol that is big enough.
+	if cap(s.ct.tableSymbol) < int(tableSize) {
+		s.ct.tableSymbol = make([]byte, tableSize)
+	}
+	s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+	ctSize := tableSize
+	if cap(s.ct.stateTable) < ctSize {
+		s.ct.stateTable = make([]uint16, ctSize)
+	}
+	s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+	if cap(s.ct.symbolTT) < 256 {
+		s.ct.symbolTT = make([]symbolTransform, 256)
+	}
+	s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *fseEncoder) buildCTable() error {
+	tableSize := uint32(1 << s.actualTableLog)
+	highThreshold := tableSize - 1
+	var cumul [256]int16
+
+	s.allocCtable()
+	tableSymbol := s.ct.tableSymbol[:tableSize]
+	// symbol start positions
+	{
+		cumul[0] = 0
+		for ui, v := range s.norm[:s.symbolLen-1] {
+			u := byte(ui) // one less than reference
+			if v == -1 {
+				// Low proba symbol
+				cumul[u+1] = cumul[u] + 1
+				tableSymbol[highThreshold] = u
+				highThreshold--
+			} else {
+				cumul[u+1] = cumul[u] + v
+			}
+		}
+		// Encode last symbol separately to avoid overflowing u
+		u := int(s.symbolLen - 1)
+		v := s.norm[s.symbolLen-1]
+		if v == -1 {
+			// Low proba symbol
+			cumul[u+1] = cumul[u] + 1
+			tableSymbol[highThreshold] = byte(u)
+			highThreshold--
+		} else {
+			cumul[u+1] = cumul[u] + v
+		}
+		if uint32(cumul[s.symbolLen]) != tableSize {
+			return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+		}
+		cumul[s.symbolLen] = int16(tableSize) + 1
+	}
+	// Spread symbols
+	s.zeroBits = false
+	{
+		step := tableStep(tableSize)
+		tableMask := tableSize - 1
+		var position uint32
+		// if any symbol > largeLimit, we may have 0 bits output.
+		largeLimit := int16(1 << (s.actualTableLog - 1))
+		for ui, v := range s.norm[:s.symbolLen] {
+			symbol := byte(ui)
+			if v > largeLimit {
+				s.zeroBits = true
+			}
+			for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+				tableSymbol[position] = symbol
+				position = (position + step) & tableMask
+				for position > highThreshold {
+					position = (position + step) & tableMask
+				} /* Low proba area */
+			}
+		}
+
+		// Check if we have gone through all positions
+		if position != 0 {
+			return errors.New("position!=0")
+		}
+	}
+
+	// Build table
+	table := s.ct.stateTable
+	{
+		tsi := int(tableSize)
+		for u, v := range tableSymbol {
+			// TableU16 : sorted by symbol order; gives next state value
+			table[cumul[v]] = uint16(tsi + u)
+			cumul[v]++
+		}
+	}
+
+	// Build Symbol Transformation Table
+	{
+		total := int16(0)
+		symbolTT := s.ct.symbolTT[:s.symbolLen]
+		tableLog := s.actualTableLog
+		tl := (uint32(tableLog) << 16) - (1 << tableLog)
+		for i, v := range s.norm[:s.symbolLen] {
+			switch v {
+			case 0:
+			case -1, 1:
+				symbolTT[i].deltaNbBits = tl
+				symbolTT[i].deltaFindState = int16(total - 1)
+				total++
+			default:
+				maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
+				minStatePlus := uint32(v) << maxBitsOut
+				symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+				symbolTT[i].deltaFindState = int16(total - v)
+				total += v
+			}
+		}
+		if total != int16(tableSize) {
+			return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+		}
+	}
+	return nil
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+func (s *fseEncoder) setRLE(val byte) {
+	s.allocCtable()
+	s.actualTableLog = 0
+	s.ct.stateTable = s.ct.stateTable[:1]
+	s.ct.symbolTT[val] = symbolTransform{
+		deltaFindState: 0,
+		deltaNbBits:    0,
+	}
+	if debug {
+		println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
+	}
+	s.rleVal = val
+	s.useRLE = true
+}
+
+// setBits will set output bits for the transform.
+// if nil is provided, the number of bits is equal to the index.
+func (s *fseEncoder) setBits(transform []byte) {
+	if s.reUsed || s.preDefined {
+		return
+	}
+	if s.useRLE {
+		if transform == nil {
+			s.ct.symbolTT[s.rleVal].outBits = s.rleVal
+			s.maxBits = s.rleVal
+			return
+		}
+		s.maxBits = transform[s.rleVal]
+		s.ct.symbolTT[s.rleVal].outBits = s.maxBits
+		return
+	}
+	if transform == nil {
+		for i := range s.ct.symbolTT[:s.symbolLen] {
+			s.ct.symbolTT[i].outBits = uint8(i)
+		}
+		s.maxBits = uint8(s.symbolLen - 1)
+		return
+	}
+	s.maxBits = 0
+	for i, v := range transform[:s.symbolLen] {
+		s.ct.symbolTT[i].outBits = v
+		if v > s.maxBits {
+			// We could assume bits always going up, but we play safe.
+			s.maxBits = v
+		}
+	}
+}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+// If successful, compression tables will also be made ready.
+func (s *fseEncoder) normalizeCount(length int) error {
+	if s.reUsed {
+		return nil
+	}
+	s.optimalTableLog(length)
+	var (
+		tableLog          = s.actualTableLog
+		scale             = 62 - uint64(tableLog)
+		step              = (1 << 62) / uint64(length)
+		vStep             = uint64(1) << (scale - 20)
+		stillToDistribute = int16(1 << tableLog)
+		largest           int
+		largestP          int16
+		lowThreshold      = (uint32)(length >> tableLog)
+	)
+	if s.maxCount == length {
+		s.useRLE = true
+		return nil
+	}
+	s.useRLE = false
+	for i, cnt := range s.count[:s.symbolLen] {
+		// already handled
+		// if (count[s] == s.length) return 0;   /* rle special case */
+
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			stillToDistribute--
+		} else {
+			proba := (int16)((uint64(cnt) * step) >> scale)
+			if proba < 8 {
+				restToBeat := vStep * uint64(rtbTable[proba])
+				v := uint64(cnt)*step - (uint64(proba) << scale)
+				if v > restToBeat {
+					proba++
+				}
+			}
+			if proba > largestP {
+				largestP = proba
+				largest = i
+			}
+			s.norm[i] = proba
+			stillToDistribute -= proba
+		}
+	}
+
+	if -stillToDistribute >= (s.norm[largest] >> 1) {
+		// corner case, need another normalization method
+		err := s.normalizeCount2(length)
+		if err != nil {
+			return err
+		}
+		if debug {
+			err = s.validateNorm()
+			if err != nil {
+				return err
+			}
+		}
+		return s.buildCTable()
+	}
+	s.norm[largest] += stillToDistribute
+	if debug {
+		err := s.validateNorm()
+		if err != nil {
+			return err
+		}
+	}
+	return s.buildCTable()
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *fseEncoder) normalizeCount2(length int) error {
+	const notYetAssigned = -2
+	var (
+		distributed  uint32
+		total        = uint32(length)
+		tableLog     = s.actualTableLog
+		lowThreshold = uint32(total >> tableLog)
+		lowOne       = uint32((total * 3) >> (tableLog + 1))
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if cnt == 0 {
+			s.norm[i] = 0
+			continue
+		}
+		if cnt <= lowThreshold {
+			s.norm[i] = -1
+			distributed++
+			total -= cnt
+			continue
+		}
+		if cnt <= lowOne {
+			s.norm[i] = 1
+			distributed++
+			total -= cnt
+			continue
+		}
+		s.norm[i] = notYetAssigned
+	}
+	toDistribute := (1 << tableLog) - distributed
+
+	if (total / toDistribute) > lowOne {
+		// risk of rounding to zero
+		lowOne = uint32((total * 3) / (toDistribute * 2))
+		for i, cnt := range s.count[:s.symbolLen] {
+			if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+				s.norm[i] = 1
+				distributed++
+				total -= cnt
+				continue
+			}
+		}
+		toDistribute = (1 << tableLog) - distributed
+	}
+	if distributed == uint32(s.symbolLen)+1 {
+		// all values are pretty poor;
+		//   probably incompressible data (should have already been detected);
+		//   find max, then give all remaining points to max
+		var maxV int
+		var maxC uint32
+		for i, cnt := range s.count[:s.symbolLen] {
+			if cnt > maxC {
+				maxV = i
+				maxC = cnt
+			}
+		}
+		s.norm[maxV] += int16(toDistribute)
+		return nil
+	}
+
+	if total == 0 {
+		// all of the symbols were low enough for the lowOne or lowThreshold
+		for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+			if s.norm[i] > 0 {
+				toDistribute--
+				s.norm[i]++
+			}
+		}
+		return nil
+	}
+
+	var (
+		vStepLog = 62 - uint64(tableLog)
+		mid      = uint64((1 << (vStepLog - 1)) - 1)
+		rStep    = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+		tmpTotal = mid
+	)
+	for i, cnt := range s.count[:s.symbolLen] {
+		if s.norm[i] == notYetAssigned {
+			var (
+				end    = tmpTotal + uint64(cnt)*rStep
+				sStart = uint32(tmpTotal >> vStepLog)
+				sEnd   = uint32(end >> vStepLog)
+				weight = sEnd - sStart
+			)
+			if weight < 1 {
+				return errors.New("weight < 1")
+			}
+			s.norm[i] = int16(weight)
+			tmpTotal = end
+		}
+	}
+	return nil
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *fseEncoder) optimalTableLog(length int) {
+	tableLog := uint8(maxEncTableLog)
+	minBitsSrc := highBit(uint32(length)) + 1
+	minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
+	minBits := uint8(minBitsSymbols)
+	if minBitsSrc < minBitsSymbols {
+		minBits = uint8(minBitsSrc)
+	}
+
+	maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
+	if maxBitsSrc < tableLog {
+		// Accuracy can be reduced
+		tableLog = maxBitsSrc
+	}
+	if minBits > tableLog {
+		tableLog = minBits
+	}
+	// Need a minimum to safely represent all symbol values
+	if tableLog < minEncTablelog {
+		tableLog = minEncTablelog
+	}
+	if tableLog > maxEncTableLog {
+		tableLog = maxEncTableLog
+	}
+	s.actualTableLog = tableLog
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *fseEncoder) validateNorm() (err error) {
+	var total int
+	for _, v := range s.norm[:s.symbolLen] {
+		if v >= 0 {
+			total += int(v)
+		} else {
+			total -= int(v)
+		}
+	}
+	defer func() {
+		if err == nil {
+			return
+		}
+		fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+		for i, v := range s.norm[:s.symbolLen] {
+			fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+		}
+	}()
+	if total != (1 << s.actualTableLog) {
+		return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
+	}
+	for i, v := range s.count[s.symbolLen:] {
+		if v != 0 {
+			return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
+		}
+	}
+	return nil
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
+	if s.useRLE {
+		return append(out, s.rleVal), nil
+	}
+	if s.preDefined || s.reUsed {
+		// Never write predefined.
+		return out, nil
+	}
+
+	var (
+		tableLog  = s.actualTableLog
+		tableSize = 1 << tableLog
+		previous0 bool
+		charnum   uint16
+
+		// maximum header size plus 2 extra bytes for final output if bitCount == 0.
+		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
+
+		// Write Table Size
+		bitStream = uint32(tableLog - minEncTablelog)
+		bitCount  = uint(4)
+		remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+		threshold = int16(tableSize)
+		nbBits    = uint(tableLog + 1)
+		outP      = len(out)
+	)
+	if cap(out) < outP+maxHeaderSize {
+		out = append(out, make([]byte, maxHeaderSize*3)...)
+		out = out[:len(out)-maxHeaderSize*3]
+	}
+	out = out[:outP+maxHeaderSize]
+
+	// stops at 1
+	for remaining > 1 {
+		if previous0 {
+			start := charnum
+			for s.norm[charnum] == 0 {
+				charnum++
+			}
+			for charnum >= start+24 {
+				start += 24
+				bitStream += uint32(0xFFFF) << bitCount
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+			}
+			for charnum >= start+3 {
+				start += 3
+				bitStream += 3 << bitCount
+				bitCount += 2
+			}
+			bitStream += uint32(charnum-start) << bitCount
+			bitCount += 2
+			if bitCount > 16 {
+				out[outP] = byte(bitStream)
+				out[outP+1] = byte(bitStream >> 8)
+				outP += 2
+				bitStream >>= 16
+				bitCount -= 16
+			}
+		}
+
+		count := s.norm[charnum]
+		charnum++
+		max := (2*threshold - 1) - remaining
+		if count < 0 {
+			remaining += count
+		} else {
+			remaining -= count
+		}
+		count++ // +1 for extra accuracy
+		if count >= threshold {
+			count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+		}
+		bitStream += uint32(count) << bitCount
+		bitCount += nbBits
+		if count < max {
+			bitCount--
+		}
+
+		previous0 = count == 1
+		if remaining < 1 {
+			return nil, errors.New("internal error: remaining < 1")
+		}
+		for remaining < threshold {
+			nbBits--
+			threshold >>= 1
+		}
+
+		if bitCount > 16 {
+			out[outP] = byte(bitStream)
+			out[outP+1] = byte(bitStream >> 8)
+			outP += 2
+			bitStream >>= 16
+			bitCount -= 16
+		}
+	}
+
+	if outP+2 > len(out) {
+		return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+	}
+	out[outP] = byte(bitStream)
+	out[outP+1] = byte(bitStream >> 8)
+	outP += int((bitCount + 7) / 8)
+
+	if charnum > s.symbolLen {
+		return nil, errors.New("internal error: charnum > s.symbolLen")
+	}
+	return out[:outP], nil
+}
+
+// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+// note 1 : assume symbolValue is valid (<= maxSymbolValue)
+// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
+func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
+	minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
+	threshold := (minNbBits + 1) << 16
+	if debug {
+		if !(s.actualTableLog < 16) {
+			panic("!s.actualTableLog < 16")
+		}
+		// ensure enough room for renormalization double shift
+		if !(uint8(accuracyLog) < 31-s.actualTableLog) {
+			panic("!uint8(accuracyLog) < 31-s.actualTableLog")
+		}
+	}
+	tableSize := uint32(1) << s.actualTableLog
+	deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
+	// linear interpolation (very approximate)
+	normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
+	bitMultiplier := uint32(1) << accuracyLog
+	if debug {
+		if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
+			panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
+		}
+		if normalizedDeltaFromThreshold > bitMultiplier {
+			panic("normalizedDeltaFromThreshold > bitMultiplier")
+		}
+	}
+	return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
+}
+
+// Returns the cost in bits of encoding the distribution in count using ctable.
+// Histogram should only be up to the last non-zero symbol.
+// Returns an -1 if ctable cannot represent all the symbols in count.
+func (s *fseEncoder) approxSize(hist []uint32) uint32 {
+	if int(s.symbolLen) < len(hist) {
+		// More symbols than we have.
+		return math.MaxUint32
+	}
+	if s.useRLE {
+		// We will never reuse RLE encoders.
+		return math.MaxUint32
+	}
+	const kAccuracyLog = 8
+	badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
+	var cost uint32
+	for i, v := range hist {
+		if v == 0 {
+			continue
+		}
+		if s.norm[i] == 0 {
+			return math.MaxUint32
+		}
+		bitCost := s.bitCost(uint8(i), kAccuracyLog)
+		if bitCost > badCost {
+			return math.MaxUint32
+		}
+		cost += v * bitCost
+	}
+	return cost >> kAccuracyLog
+}
+
+// maxHeaderSize returns the maximum header size in bits.
+// This is not exact size, but we want a penalty for new tables anyway.
+func (s *fseEncoder) maxHeaderSize() uint32 {
+	if s.preDefined {
+		return 0
+	}
+	if s.useRLE {
+		return 8
+	}
+	return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+	bw         *bitWriter
+	stateTable []uint16
+	state      uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
+	c.bw = bw
+	c.stateTable = ct.stateTable
+	if len(c.stateTable) == 1 {
+		// RLE
+		c.stateTable[0] = uint16(0)
+		c.state = 0
+		return
+	}
+	nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+	im := int32((nbBitsOut << 16) - first.deltaNbBits)
+	lu := (im >> nbBitsOut) + int32(first.deltaFindState)
+	c.state = c.stateTable[lu]
+	return
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+	nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+	dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
+	c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+	c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+	c.bw.flush32()
+	c.bw.addBits16NC(c.state, tableLog)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
new file mode 100644
index 0000000..6c17dc1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -0,0 +1,158 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"fmt"
+	"math"
+	"sync"
+)
+
+var (
+	// fsePredef are the predefined fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredef [3]fseDecoder
+
+	// fsePredefEnc are the predefined encoder based on fse tables as defined here:
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+	// These values are already transformed.
+	fsePredefEnc [3]fseEncoder
+
+	// symbolTableX contain the transformations needed for each type as defined in
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	symbolTableX [3][]baseOffset
+
+	// maxTableSymbol is the biggest supported symbol for each table type
+	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+	maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
+
+	// bitTables is the bits table for each table.
+	bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
+)
+
+type tableIndex uint8
+
+const (
+	// indexes for fsePredef and symbolTableX
+	tableLiteralLengths tableIndex = 0
+	tableOffsets        tableIndex = 1
+	tableMatchLengths   tableIndex = 2
+
+	maxLiteralLengthSymbol = 35
+	maxOffsetLengthSymbol  = 30
+	maxMatchLengthSymbol   = 52
+)
+
+// baseOffset is used for calculating transformations.
+type baseOffset struct {
+	baseLine uint32
+	addBits  uint8
+}
+
+// fillBase will precalculate base offsets with the given bit distributions.
+func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
+	if len(bits) != len(dst) {
+		panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
+	}
+	for i, bit := range bits {
+		if base > math.MaxInt32 {
+			panic(fmt.Sprintf("invalid decoding table, base overflows int32"))
+		}
+
+		dst[i] = baseOffset{
+			baseLine: base,
+			addBits:  bit,
+		}
+		base += 1 << bit
+	}
+}
+
+var predef sync.Once
+
+func initPredefined() {
+	predef.Do(func() {
+		// Literals length codes
+		tmp := make([]baseOffset, 36)
+		for i := range tmp[:16] {
+			tmp[i] = baseOffset{
+				baseLine: uint32(i),
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableLiteralLengths] = tmp
+
+		// Match length codes
+		tmp = make([]baseOffset, 53)
+		for i := range tmp[:32] {
+			tmp[i] = baseOffset{
+				// The transformation adds the 3 length.
+				baseLine: uint32(i) + 3,
+				addBits:  0,
+			}
+		}
+		fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+		symbolTableX[tableMatchLengths] = tmp
+
+		// Offset codes
+		tmp = make([]baseOffset, maxOffsetBits+1)
+		tmp[1] = baseOffset{
+			baseLine: 1,
+			addBits:  1,
+		}
+		fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+		symbolTableX[tableOffsets] = tmp
+
+		// Fill predefined tables and transform them.
+		// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+		for i := range fsePredef[:] {
+			f := &fsePredef[i]
+			switch tableIndex(i) {
+			case tableLiteralLengths:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+					2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+					-1, -1, -1, -1})
+				f.symbolLen = 36
+			case tableOffsets:
+				// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+				f.actualTableLog = 5
+				copy(f.norm[:], []int16{
+					1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+				f.symbolLen = 29
+			case tableMatchLengths:
+				//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+				f.actualTableLog = 6
+				copy(f.norm[:], []int16{
+					1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+					1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+					-1, -1, -1, -1, -1})
+				f.symbolLen = 53
+			}
+			if err := f.buildDtable(); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			if err := f.transform(symbolTableX[i]); err != nil {
+				panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+			}
+			f.preDefined = true
+
+			// Create encoder as well
+			enc := &fsePredefEnc[i]
+			copy(enc.norm[:], f.norm[:])
+			enc.symbolLen = f.symbolLen
+			enc.actualTableLog = f.actualTableLog
+			if err := enc.buildCTable(); err != nil {
+				panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+			}
+			enc.setBits(bitTables[i])
+			enc.preDefined = true
+		}
+	})
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
new file mode 100644
index 0000000..4a75206
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -0,0 +1,77 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+	prime3bytes = 506832829
+	prime4bytes = 2654435761
+	prime5bytes = 889523592379
+	prime6bytes = 227718039650203
+	prime7bytes = 58295818150454627
+	prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
+// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
+// h should always be <32.
+// Preferably h and l should be a constant.
+// FIXME: This does NOT get resolved, if 'mls' is constant,
+//  so this cannot be used.
+func hashLen(u uint64, hashLog, mls uint8) uint32 {
+	switch mls {
+	case 5:
+		return hash5(u, hashLog)
+	case 6:
+		return hash6(u, hashLog)
+	case 7:
+		return hash7(u, hashLog)
+	case 8:
+		return hash8(u, hashLog)
+	default:
+		return hash4x64(u, hashLog)
+	}
+}
+
+// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash3(u uint32, h uint8) uint32 {
+	return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4(u uint32, h uint8) uint32 {
+	return (u * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4x64(u uint64, h uint8) uint32 {
+	return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
+}
+
+// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash5(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
+}
+
+// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash6(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
+}
+
+// hash8 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash8(u uint64, h uint8) uint32 {
+	return uint32((u * prime8bytes) >> ((64 - h) & 63))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
new file mode 100644
index 0000000..e8c419b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -0,0 +1,73 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"github.com/klauspost/compress/huff0"
+)
+
+// history contains the information transferred between blocks.
+type history struct {
+	b             []byte
+	huffTree      *huff0.Scratch
+	recentOffsets [3]int
+	decoders      sequenceDecs
+	windowSize    int
+	maxSize       int
+	error         bool
+}
+
+// reset will reset the history to initial state of a frame.
+// The history must already have been initialized to the desired size.
+func (h *history) reset() {
+	h.b = h.b[:0]
+	h.error = false
+	h.recentOffsets = [3]int{1, 4, 8}
+	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+	}
+	h.decoders = sequenceDecs{}
+	if h.huffTree != nil {
+		huffDecoderPool.Put(h.huffTree)
+	}
+	h.huffTree = nil
+	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+// append bytes to history.
+// This function will make sure there is space for it,
+// if the buffer has been allocated with enough extra space.
+func (h *history) append(b []byte) {
+	if len(b) >= h.windowSize {
+		// Discard all history by simply overwriting
+		h.b = h.b[:h.windowSize]
+		copy(h.b, b[len(b)-h.windowSize:])
+		return
+	}
+
+	// If there is space, append it.
+	if len(b) < cap(h.b)-len(h.b) {
+		h.b = append(h.b, b...)
+		return
+	}
+
+	// Move data down so we only have window size left.
+	// We know we have less than window size in b at this point.
+	discard := len(b) + len(h.b) - h.windowSize
+	copy(h.b, h.b[discard:])
+	h.b = h.b[:h.windowSize]
+	copy(h.b[h.windowSize-len(b):], b)
+}
+
+// append bytes to history without ever discarding anything.
+func (h *history) appendKeep(b []byte) {
+	h.b = append(h.b, b...)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
new file mode 100644
index 0000000..24b5306
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
new file mode 100644
index 0000000..69aa3bb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -0,0 +1,58 @@
+# xxhash
+
+VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
+
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
new file mode 100644
index 0000000..426b9ca
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -0,0 +1,238 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package.
+
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	b = b[len(d.mem):]
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
new file mode 100644
index 0000000..35318d7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(*Digest, []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
new file mode 100644
index 0000000..d580e32
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX	h
+// CX	pointer to advance through b
+// DX	n
+// BX	loop end
+// R8	v1, k1
+// R9	v2
+// R10	v3
+// R11	v4
+// R12	tmp
+// R13	prime1v
+// R14	prime2v
+// R15	prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+	MOVQ  (CX), R12 \
+	ADDQ  $8, CX    \
+	IMULQ R14, R12  \
+	ADDQ  R12, r    \
+	ROLQ  $31, r    \
+	IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+	IMULQ R14, val \
+	ROLQ  $31, val \
+	IMULQ R13, val \
+	XORQ  val, acc \
+	IMULQ R13, acc \
+	ADDQ  R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+	// Load fixed primes.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+	MOVQ ·prime4v(SB), R15
+
+	// Load slice.
+	MOVQ b_base+0(FP), CX
+	MOVQ b_len+8(FP), DX
+	LEAQ (CX)(DX*1), BX
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, BX
+
+	// Check whether we have at least one block.
+	CMPQ DX, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ R13, R8
+	ADDQ R14, R8
+	MOVQ R14, R9
+	XORQ R10, R10
+	XORQ R11, R11
+	SUBQ R13, R11
+
+	// Loop until CX > BX.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ CX, BX
+	JLE  blockLoop
+
+	MOVQ R8, AX
+	ROLQ $1, AX
+	MOVQ R9, R12
+	ROLQ $7, R12
+	ADDQ R12, AX
+	MOVQ R10, R12
+	ROLQ $12, R12
+	ADDQ R12, AX
+	MOVQ R11, R12
+	ROLQ $18, R12
+	ADDQ R12, AX
+
+	mergeRound(AX, R8)
+	mergeRound(AX, R9)
+	mergeRound(AX, R10)
+	mergeRound(AX, R11)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+	ADDQ DX, AX
+
+	// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+	ADDQ $24, BX
+
+	CMPQ CX, BX
+	JG   fourByte
+
+wordLoop:
+	// Calculate k1.
+	MOVQ  (CX), R8
+	ADDQ  $8, CX
+	IMULQ R14, R8
+	ROLQ  $31, R8
+	IMULQ R13, R8
+
+	XORQ  R8, AX
+	ROLQ  $27, AX
+	IMULQ R13, AX
+	ADDQ  R15, AX
+
+	CMPQ CX, BX
+	JLE  wordLoop
+
+fourByte:
+	ADDQ $4, BX
+	CMPQ CX, BX
+	JG   singles
+
+	MOVL  (CX), R8
+	ADDQ  $4, CX
+	IMULQ R13, R8
+	XORQ  R8, AX
+
+	ROLQ  $23, AX
+	IMULQ R14, AX
+	ADDQ  ·prime3v(SB), AX
+
+singles:
+	ADDQ $4, BX
+	CMPQ CX, BX
+	JGE  finalize
+
+singlesLoop:
+	MOVBQZX (CX), R12
+	ADDQ    $1, CX
+	IMULQ   ·prime5v(SB), R12
+	XORQ    R12, AX
+
+	ROLQ  $11, AX
+	IMULQ R13, AX
+
+	CMPQ CX, BX
+	JL   singlesLoop
+
+finalize:
+	MOVQ  AX, R12
+	SHRQ  $33, R12
+	XORQ  R12, AX
+	IMULQ R14, AX
+	MOVQ  AX, R12
+	SHRQ  $29, R12
+	XORQ  R12, AX
+	IMULQ ·prime3v(SB), AX
+	MOVQ  AX, R12
+	SHRQ  $32, R12
+	XORQ  R12, AX
+
+	MOVQ AX, ret+24(FP)
+	RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+	// Load fixed primes needed for round.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+
+	// Load slice.
+	MOVQ b_base+8(FP), CX
+	MOVQ b_len+16(FP), DX
+	LEAQ (CX)(DX*1), BX
+	SUBQ $32, BX
+
+	// Load vN from d.
+	MOVQ d+0(FP), AX
+	MOVQ 0(AX), R8   // v1
+	MOVQ 8(AX), R9   // v2
+	MOVQ 16(AX), R10 // v3
+	MOVQ 24(AX), R11 // v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ CX, BX
+	JLE  blockLoop
+
+	// Copy vN back to d.
+	MOVQ R8, 0(AX)
+	MOVQ R9, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R11, 24(AX)
+
+	// The number of bytes written is CX minus the old base pointer.
+	SUBQ b_base+8(FP), CX
+	MOVQ CX, ret+32(FP)
+
+	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
new file mode 100644
index 0000000..4a5a821
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
new file mode 100644
index 0000000..6f3b0cb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
@@ -0,0 +1,11 @@
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
new file mode 100644
index 0000000..15a45f7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -0,0 +1,402 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+type seq struct {
+	litLen   uint32
+	matchLen uint32
+	offset   uint32
+
+	// Codes are stored here for the encoder
+	// so they only have to be looked up once.
+	llCode, mlCode, ofCode uint8
+}
+
+func (s seq) String() string {
+	if s.offset <= 3 {
+		if s.offset == 0 {
+			return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
+		}
+		return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
+	}
+	return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
+}
+
+type seqCompMode uint8
+
+const (
+	compModePredefined seqCompMode = iota
+	compModeRLE
+	compModeFSE
+	compModeRepeat
+)
+
+type sequenceDec struct {
+	// decoder keeps track of the current state and updates it from the bitstream.
+	fse    *fseDecoder
+	state  fseState
+	repeat bool
+}
+
+// init the state of the decoder with input from stream.
+func (s *sequenceDec) init(br *bitReader) error {
+	if s.fse == nil {
+		return errors.New("sequence decoder not defined")
+	}
+	s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<<s.fse.actualTableLog])
+	return nil
+}
+
+// sequenceDecs contains all 3 sequence decoders and their state.
+type sequenceDecs struct {
+	litLengths   sequenceDec
+	offsets      sequenceDec
+	matchLengths sequenceDec
+	prevOffset   [3]int
+	hist         []byte
+	literals     []byte
+	out          []byte
+	maxBits      uint8
+}
+
+// initialize all 3 decoders from the stream input.
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+	if err := s.litLengths.init(br); err != nil {
+		return errors.New("litLengths:" + err.Error())
+	}
+	if err := s.offsets.init(br); err != nil {
+		return errors.New("offsets:" + err.Error())
+	}
+	if err := s.matchLengths.init(br); err != nil {
+		return errors.New("matchLengths:" + err.Error())
+	}
+	s.literals = literals
+	s.hist = hist.b
+	s.prevOffset = hist.recentOffsets
+	s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+	s.out = out
+	return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+	startSize := len(s.out)
+	// Grab full sizes tables, to avoid bounds checks.
+	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	for i := seqs - 1; i >= 0; i-- {
+		if br.overread() {
+			printf("reading sequence %d, exceeded available data\n", seqs-i)
+			return io.ErrUnexpectedEOF
+		}
+		var litLen, matchOff, matchLen int
+		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+			litLen, matchOff, matchLen = s.nextFast(br, llState, mlState, ofState)
+			br.fillFast()
+		} else {
+			litLen, matchOff, matchLen = s.next(br, llState, mlState, ofState)
+			br.fill()
+		}
+
+		if debugSequences {
+			println("Seq", seqs-i-1, "Litlen:", litLen, "matchOff:", matchOff, "(abs) matchLen:", matchLen)
+		}
+
+		if litLen > len(s.literals) {
+			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", litLen, len(s.literals))
+		}
+		size := litLen + matchLen + len(s.out)
+		if size-startSize > maxBlockSize {
+			return fmt.Errorf("output (%d) bigger than max block size", size)
+		}
+		if size > cap(s.out) {
+			// Not enough size, will be extremely rarely triggered,
+			// but could be if destination slice is too small for sync operations.
+			// We add maxBlockSize to the capacity.
+			s.out = append(s.out, make([]byte, maxBlockSize)...)
+			s.out = s.out[:len(s.out)-maxBlockSize]
+		}
+		if matchLen > maxMatchLen {
+			return fmt.Errorf("match len (%d) bigger than max allowed length", matchLen)
+		}
+		if matchOff > len(s.out)+len(hist)+litLen {
+			return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen)
+		}
+		if matchOff == 0 && matchLen > 0 {
+			return fmt.Errorf("zero matchoff and matchlen > 0")
+		}
+
+		s.out = append(s.out, s.literals[:litLen]...)
+		s.literals = s.literals[litLen:]
+		out := s.out
+
+		// Copy from history.
+		// TODO: Blocks without history could be made to ignore this completely.
+		if v := matchOff - len(s.out); v > 0 {
+			// v is the start position in history from end.
+			start := len(s.hist) - v
+			if matchLen > v {
+				// Some goes into current block.
+				// Copy remainder of history
+				out = append(out, s.hist[start:]...)
+				matchOff -= v
+				matchLen -= v
+			} else {
+				out = append(out, s.hist[start:start+matchLen]...)
+				matchLen = 0
+			}
+		}
+		// We must be in current buffer now
+		if matchLen > 0 {
+			start := len(s.out) - matchOff
+			if matchLen <= len(s.out)-start {
+				// No overlap
+				out = append(out, s.out[start:start+matchLen]...)
+			} else {
+				// Overlapping copy
+				// Extend destination slice and copy one byte at the time.
+				out = out[:len(out)+matchLen]
+				src := out[start : start+matchLen]
+				// Destination is the space we just added.
+				dst := out[len(out)-matchLen:]
+				dst = dst[:len(src)]
+				for i := range src {
+					dst[i] = src[i]
+				}
+			}
+		}
+		s.out = out
+		if i == 0 {
+			// This is the last sequence, so we shouldn't update state.
+			break
+		}
+
+		// Manually inlined, ~ 5-20% faster
+		// Update all 3 states at once. Approx 20% faster.
+		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+		if nBits == 0 {
+			llState = llTable[llState.newState()&maxTableMask]
+			mlState = mlTable[mlState.newState()&maxTableMask]
+			ofState = ofTable[ofState.newState()&maxTableMask]
+		} else {
+			bits := br.getBitsFast(nBits)
+			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+			llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits >> (ofState.nbBits() & 31))
+			lowBits &= bitMask[mlState.nbBits()&15]
+			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+		}
+	}
+
+	// Add final literals
+	s.out = append(s.out, s.literals...)
+	return nil
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) update(br *bitReader) {
+	// Max 8 bits
+	s.litLengths.state.next(br)
+	// Max 9 bits
+	s.matchLengths.state.next(br)
+	// Max 8 bits
+	s.offsets.state.next(br)
+}
+
+var bitMask [16]uint16
+
+func init() {
+	for i := range bitMask[:] {
+		bitMask[i] = uint16((1 << uint(i)) - 1)
+	}
+}
+
+// update states, at least 27 bits must be available.
+func (s *sequenceDecs) updateAlt(br *bitReader) {
+	// Update all 3 states at once. Approx 20% faster.
+	a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+
+	nBits := a.nbBits() + b.nbBits() + c.nbBits()
+	if nBits == 0 {
+		s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
+		s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
+		s.offsets.state.state = s.offsets.state.dt[c.newState()]
+		return
+	}
+	bits := br.getBitsFast(nBits)
+	lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
+	s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
+
+	lowBits = uint16(bits >> (c.nbBits() & 31))
+	lowBits &= bitMask[b.nbBits()&15]
+	s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
+
+	lowBits = uint16(bits) & bitMask[c.nbBits()&15]
+	s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
+}
+
+// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
+func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fillFast()
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
+		br.fillFast()
+	}
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
+
+	if moB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = mo
+		return
+	}
+	// mo = s.adjustOffset(mo, ll, moB)
+	// Inlined for rather big speedup
+	if ll == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		mo++
+	}
+
+	if mo == 0 {
+		mo = s.prevOffset[0]
+		return
+	}
+	var temp int
+	if mo == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[mo]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if mo != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	mo = temp
+	return
+}
+
+func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+	// Final will not read from stream.
+	ll, llB := llState.final()
+	ml, mlB := mlState.final()
+	mo, moB := ofState.final()
+
+	// extra bits are stored in reverse order.
+	br.fill()
+	if s.maxBits <= 32 {
+		mo += br.getBits(moB)
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+	} else {
+		mo += br.getBits(moB)
+		br.fill()
+		// matchlength+literal length, max 32 bits
+		ml += br.getBits(mlB)
+		ll += br.getBits(llB)
+
+	}
+	mo = s.adjustOffset(mo, ll, moB)
+	return
+}
+
+func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
+	if offsetB > 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+		s.prevOffset[1] = s.prevOffset[0]
+		s.prevOffset[0] = offset
+		return offset
+	}
+
+	if litLen == 0 {
+		// There is an exception though, when current sequence's literals_length = 0.
+		// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+		// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+		offset++
+	}
+
+	if offset == 0 {
+		return s.prevOffset[0]
+	}
+	var temp int
+	if offset == 3 {
+		temp = s.prevOffset[0] - 1
+	} else {
+		temp = s.prevOffset[offset]
+	}
+
+	if temp == 0 {
+		// 0 is not valid; input is corrupted; force offset to 1
+		println("temp was 0")
+		temp = 1
+	}
+
+	if offset != 1 {
+		s.prevOffset[2] = s.prevOffset[1]
+	}
+	s.prevOffset[1] = s.prevOffset[0]
+	s.prevOffset[0] = temp
+	return temp
+}
+
+// mergeHistory will merge history.
+func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
+	for i := uint(0); i < 3; i++ {
+		var sNew, sHist *sequenceDec
+		switch i {
+		default:
+			// same as "case 0":
+			sNew = &s.litLengths
+			sHist = &hist.litLengths
+		case 1:
+			sNew = &s.offsets
+			sHist = &hist.offsets
+		case 2:
+			sNew = &s.matchLengths
+			sHist = &hist.matchLengths
+		}
+		if sNew.repeat {
+			if sHist.fse == nil {
+				return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
+			}
+			continue
+		}
+		if sNew.fse == nil {
+			return nil, fmt.Errorf("sequence stream %d, no fse found", i)
+		}
+		if sHist.fse != nil && !sHist.fse.preDefined {
+			fseDecoderPool.Put(sHist.fse)
+		}
+		sHist.fse = sNew.fse
+	}
+	return hist, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
new file mode 100644
index 0000000..36bcc3c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -0,0 +1,115 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "math/bits"
+
+type seqCoders struct {
+	llEnc, ofEnc, mlEnc    *fseEncoder
+	llPrev, ofPrev, mlPrev *fseEncoder
+}
+
+// swap coders with another (block).
+func (s *seqCoders) swap(other *seqCoders) {
+	*s, *other = *other, *s
+}
+
+// setPrev will update the previous encoders to the actually used ones
+// and make sure a fresh one is in the main slot.
+func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
+	compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
+		// We used the new one, more current to history and reuse the previous history
+		if *current == used {
+			*prev, *current = *current, *prev
+			c := *current
+			p := *prev
+			c.reUsed = false
+			p.reUsed = true
+			return
+		}
+		if used == *prev {
+			return
+		}
+		// Ensure we cannot reuse by accident
+		prevEnc := *prev
+		prevEnc.symbolLen = 0
+		return
+	}
+	compareSwap(ll, &s.llEnc, &s.llPrev)
+	compareSwap(ml, &s.mlEnc, &s.mlPrev)
+	compareSwap(of, &s.ofEnc, &s.ofPrev)
+}
+
+func highBit(val uint32) (n uint32) {
+	return uint32(bits.Len32(val) - 1)
+}
+
+var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
+	8, 9, 10, 11, 12, 13, 14, 15,
+	16, 16, 17, 17, 18, 18, 19, 19,
+	20, 20, 20, 20, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22,
+	23, 23, 23, 23, 23, 23, 23, 23,
+	24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24}
+
+// Up to 6 bits
+const maxLLCode = 35
+
+// llBitsTable translates from ll code to number of bits.
+var llBitsTable = [maxLLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 6, 7, 8, 9, 10, 11, 12,
+	13, 14, 15, 16}
+
+// llCode returns the code that represents the literal length requested.
+func llCode(litLength uint32) uint8 {
+	const llDeltaCode = 19
+	if litLength <= 63 {
+		// Compiler insists on bounds check (Go 1.12)
+		return llCodeTable[litLength&63]
+	}
+	return uint8(highBit(litLength)) + llDeltaCode
+}
+
+var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+	32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+	38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+	40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+	41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+	42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
+
+// Up to 6 bits
+const maxMLCode = 52
+
+// mlBitsTable translates from ml code to number of bits.
+var mlBitsTable = [maxMLCode + 1]byte{
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 7, 8, 9, 10, 11,
+	12, 13, 14, 15, 16}
+
+// note : mlBase = matchLength - MINMATCH;
+// because it's the format it's stored in seqStore->sequences
+func mlCode(mlBase uint32) uint8 {
+	const mlDeltaCode = 36
+	if mlBase <= 127 {
+		// Compiler insists on bounds check (Go 1.12)
+		return mlCodeTable[mlBase&127]
+	}
+	return uint8(highBit(mlBase)) + mlDeltaCode
+}
+
+func ofCode(offset uint32) uint8 {
+	// A valid offset will always be > 0.
+	return uint8(bits.Len32(offset) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
new file mode 100644
index 0000000..356956b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -0,0 +1,436 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+	"encoding/binary"
+	"errors"
+	"hash/crc32"
+	"io"
+
+	"github.com/klauspost/compress/huff0"
+	"github.com/klauspost/compress/snappy"
+)
+
+const (
+	snappyTagLiteral = 0x00
+	snappyTagCopy1   = 0x01
+	snappyTagCopy2   = 0x02
+	snappyTagCopy4   = 0x03
+)
+
+const (
+	snappyChecksumSize = 4
+	snappyMagicBody    = "sNaPpY"
+
+	// snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	snappyMaxBlockSize = 65536
+
+	// snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	snappyMaxEncodedLenOfMaxBlockSize = 76490
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+	// ErrSnappyCorrupt reports that the input is invalid.
+	ErrSnappyCorrupt = errors.New("snappy: corrupt input")
+	// ErrSnappyTooLarge reports that the uncompressed length is too large.
+	ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrSnappyUnsupported reports that the input isn't supported.
+	ErrSnappyUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
+// Conversion is done by converting the stream directly from Snappy without intermediate
+// full decoding.
+// Therefore the compression ratio is much less than what can be done by a full decompression
+// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+// any errors being generated.
+// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+// However, it provides really fast recompression of Snappy streams.
+// The converter can be reused to avoid allocations, even after errors.
+type SnappyConverter struct {
+	r     io.Reader
+	err   error
+	buf   []byte
+	block *blockEnc
+}
+
+// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
+// If any error is detected on the Snappy stream it is returned.
+// The number of bytes written is returned.
+func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+	initPredefined()
+	r.err = nil
+	r.r = in
+	if r.block == nil {
+		r.block = &blockEnc{}
+		r.block.init()
+	}
+	r.block.initNewEncode()
+	if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
+		r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
+	}
+	r.block.litEnc.Reuse = huff0.ReusePolicyNone
+	var written int64
+	var readHeader bool
+	{
+		var header []byte
+		var n int
+		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+
+		n, r.err = w.Write(header)
+		if r.err != nil {
+			return written, r.err
+		}
+		written += int64(n)
+	}
+
+	for {
+		if !r.readFull(r.buf[:4], true) {
+			// Add empty last block
+			r.block.reset(nil)
+			r.block.last = true
+			err := r.block.encodeLits(false)
+			if err != nil {
+				return written, err
+			}
+			n, err := w.Write(r.block.output)
+			if err != nil {
+				return written, err
+			}
+			written += int64(n)
+
+			return written, r.err
+		}
+		chunkType := r.buf[0]
+		if !readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				println("chunkType != chunkTypeStreamIdentifier", chunkType)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			println("chunkLen > len(r.buf)", chunkType)
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			//checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[snappyChecksumSize:]
+
+			n, hdr, err := snappyDecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return written, r.err
+			}
+			buf = buf[hdr:]
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			r.block.pushOffsets()
+			if err := decodeSnappy(r.block, buf); err != nil {
+				r.err = err
+				return written, r.err
+			}
+			if r.block.size+r.block.extraLits != n {
+				printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err = r.block.encode(false)
+			switch err {
+			case errIncompressible:
+				r.block.popOffsets()
+				r.block.reset(nil)
+				r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
+				if err != nil {
+					println("snappy.Decode:", err)
+					return written, err
+				}
+				err = r.block.encodeLits(false)
+				if err != nil {
+					return written, err
+				}
+			case nil:
+			default:
+				return written, err
+			}
+
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+		case chunkTypeUncompressedData:
+			if debug {
+				println("Uncompressed, chunklen", chunkLen)
+			}
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < snappyChecksumSize {
+				println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.reset(nil)
+			buf := r.buf[:snappyChecksumSize]
+			if !r.readFull(buf, false) {
+				return written, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - snappyChecksumSize
+			if n > snappyMaxBlockSize {
+				println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			r.block.literals = r.block.literals[:n]
+			if !r.readFull(r.block.literals, false) {
+				return written, r.err
+			}
+			if snappyCRC(r.block.literals) != checksum {
+				println("literals crc mismatch")
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			err := r.block.encodeLits(false)
+			if err != nil {
+				return written, err
+			}
+			n, r.err = w.Write(r.block.output)
+			if r.err != nil {
+				return written, err
+			}
+			written += int64(n)
+			continue
+
+		case chunkTypeStreamIdentifier:
+			if debug {
+				println("stream id", chunkLen, len(snappyMagicBody))
+			}
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(snappyMagicBody) {
+				println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
+				r.err = ErrSnappyCorrupt
+				return written, r.err
+			}
+			if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
+				return written, r.err
+			}
+			for i := 0; i < len(snappyMagicBody); i++ {
+				if r.buf[i] != snappyMagicBody[i] {
+					println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
+					r.err = ErrSnappyCorrupt
+					return written, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			println("chunkType <= 0x7f")
+			r.err = ErrSnappyUnsupported
+			return written, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return written, r.err
+		}
+	}
+}
+
+// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read.
+func decodeSnappy(blk *blockEnc, src []byte) error {
+	//decodeRef(make([]byte, snappyMaxBlockSize), src)
+	var s, length int
+	lits := blk.extraLits
+	var offset uint32
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case snappyTagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					println("uint(s) > uint(len(src)", s, src)
+					return ErrSnappyCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			if x > snappyMaxBlockSize {
+				println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
+				return ErrSnappyCorrupt
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				println("length <= 0 ", length)
+
+				return errUnsupportedLiteralLength
+			}
+			//if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
+			//	return ErrSnappyCorrupt
+			//}
+
+			blk.literals = append(blk.literals, src[s:s+length]...)
+			//println(length, "litLen")
+			lits += length
+			s += length
+			continue
+
+		case snappyTagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
+
+		case snappyTagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = uint32(src[s-2]) | uint32(src[s-1])<<8
+
+		case snappyTagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				println("uint(s) > uint(len(src)", s, len(src))
+				return ErrSnappyCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+		}
+
+		if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
+			println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
+
+			return ErrSnappyCorrupt
+		}
+
+		// Check if offset is one of the recent offsets.
+		// Adjusts the output offset accordingly.
+		// Gives a tiny bit of compression, typically around 1%.
+		if false {
+			offset = blk.matchOffset(offset, uint32(lits))
+		} else {
+			offset += 3
+		}
+
+		blk.sequences = append(blk.sequences, seq{
+			litLen:   uint32(lits),
+			offset:   offset,
+			matchLen: uint32(length) - zstdMinMatch,
+		})
+		blk.size += length + lits
+		lits = 0
+	}
+	blk.extraLits = lits
+	return nil
+}
+
+func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrSnappyCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func snappyCRC(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
+
+// snappyDecodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrSnappyCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrSnappyTooLarge
+	}
+	return int(v), n, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
new file mode 100644
index 0000000..57a8a2f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -0,0 +1,136 @@
+// Package zstd provides decompression of zstandard files.
+//
+// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
+package zstd
+
+import (
+	"errors"
+	"log"
+	"math/bits"
+)
+
+const debug = false
+const debugSequences = false
+const debugMatches = false
+
+// force encoder to use predefined tables.
+const forcePreDef = false
+
+// zstdMinMatch is the minimum zstd match length.
+const zstdMinMatch = 3
+
+var (
+	// ErrReservedBlockType is returned when a reserved block type is found.
+	// Typically this indicates wrong or corrupted input.
+	ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
+
+	// ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
+	// Typically this indicates wrong or corrupted input.
+	ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
+
+	// ErrBlockTooSmall is returned when a block is too small to be decoded.
+	// Typically returned on invalid input.
+	ErrBlockTooSmall = errors.New("block too small")
+
+	// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
+	// Typically this indicates wrong or corrupted input.
+	ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
+
+	// ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeExceeded = errors.New("window size exceeded")
+
+	// ErrWindowSizeTooSmall is returned when no window size is specified.
+	// Typically this indicates wrong or corrupted input.
+	ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
+
+	// ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
+	ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
+
+	// ErrUnknownDictionary is returned if the dictionary ID is unknown.
+	// For the time being dictionaries are not supported.
+	ErrUnknownDictionary = errors.New("unknown dictionary")
+
+	// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
+	// This is only returned if SingleSegment is specified on the frame.
+	ErrFrameSizeExceeded = errors.New("frame size exceeded")
+
+	// ErrCRCMismatch is returned if CRC mismatches.
+	ErrCRCMismatch = errors.New("CRC check failed")
+
+	// ErrDecoderClosed will be returned if the Decoder was used after
+	// Close has been called.
+	ErrDecoderClosed = errors.New("decoder used after Close")
+)
+
+func println(a ...interface{}) {
+	if debug {
+		log.Println(a...)
+	}
+}
+
+func printf(format string, a ...interface{}) {
+	if debug {
+		log.Printf(format, a...)
+	}
+}
+
+// matchLen returns the maximum length.
+// a must be the shortest of the two.
+// The function also returns whether all bytes matched.
+func matchLen(a, b []byte) int {
+	b = b[:len(a)]
+	for i := 0; i < len(a)-7; i += 8 {
+		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
+			return i + (bits.TrailingZeros64(diff) >> 3)
+		}
+	}
+	checked := (len(a) >> 3) << 3
+	a = a[checked:]
+	b = b[checked:]
+	// TODO: We could do a 4 check.
+	for i := range a {
+		if a[i] != b[i] {
+			return int(i) + checked
+		}
+	}
+	return len(a) + checked
+}
+
+// matchLen returns a match length in src between index s and t
+func matchLenIn(src []byte, s, t int32) int32 {
+	s1 := len(src)
+	b := src[t:]
+	a := src[s:s1]
+	b = b[:len(a)]
+	// Extend the match to be as long as possible.
+	for i := range a {
+		if a[i] != b[i] {
+			return int32(i)
+		}
+	}
+	return int32(len(a))
+}
+
+func load3232(b []byte, i int32) uint32 {
+	// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+	b = b[i:]
+	b = b[:4]
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load6432(b []byte, i int32) uint64 {
+	// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+	b = b[i:]
+	b = b[:8]
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func load64(b []byte, i int) uint64 {
+	// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
+	b = b[i:]
+	b = b[:8]
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/common/common.pb.go
new file mode 100644
index 0000000..c4b9028
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/common/common.pb.go
@@ -0,0 +1,464 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/common.proto
+
+package common
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type TestModeKeys int32
+
+const (
+	TestModeKeys_api_test TestModeKeys = 0
+)
+
+var TestModeKeys_name = map[int32]string{
+	0: "api_test",
+}
+
+var TestModeKeys_value = map[string]int32{
+	"api_test": 0,
+}
+
+func (x TestModeKeys) String() string {
+	return proto.EnumName(TestModeKeys_name, int32(x))
+}
+
+func (TestModeKeys) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{0}
+}
+
+// Administrative State
+type AdminState_Types int32
+
+const (
+	// The administrative state of the device is unknown
+	AdminState_UNKNOWN AdminState_Types = 0
+	// The device is pre-provisioned into Voltha, but not contacted by it
+	AdminState_PREPROVISIONED AdminState_Types = 1
+	// The device is enabled for activation and operation
+	AdminState_ENABLED AdminState_Types = 2
+	// The device is disabled and shall not perform its intended forwarding
+	// functions other than being available for re-activation.
+	AdminState_DISABLED AdminState_Types = 3
+	// The device is in the state of image download
+	AdminState_DOWNLOADING_IMAGE AdminState_Types = 4
+	// The device is marked to be deleted
+	AdminState_DELETED AdminState_Types = 5
+)
+
+var AdminState_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "PREPROVISIONED",
+	2: "ENABLED",
+	3: "DISABLED",
+	4: "DOWNLOADING_IMAGE",
+	5: "DELETED",
+}
+
+var AdminState_Types_value = map[string]int32{
+	"UNKNOWN":           0,
+	"PREPROVISIONED":    1,
+	"ENABLED":           2,
+	"DISABLED":          3,
+	"DOWNLOADING_IMAGE": 4,
+	"DELETED":           5,
+}
+
+func (x AdminState_Types) String() string {
+	return proto.EnumName(AdminState_Types_name, int32(x))
+}
+
+func (AdminState_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{2, 0}
+}
+
+// Operational Status
+type OperStatus_Types int32
+
+const (
+	// The status of the device is unknown at this point
+	OperStatus_UNKNOWN OperStatus_Types = 0
+	// The device has been discovered, but not yet activated
+	OperStatus_DISCOVERED OperStatus_Types = 1
+	// The device is being activated (booted, rebooted, upgraded, etc.)
+	OperStatus_ACTIVATING OperStatus_Types = 2
+	// Service impacting tests are being conducted
+	OperStatus_TESTING OperStatus_Types = 3
+	// The device is up and active
+	OperStatus_ACTIVE OperStatus_Types = 4
+	// The device has failed and cannot fulfill its intended role
+	OperStatus_FAILED OperStatus_Types = 5
+)
+
+var OperStatus_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "DISCOVERED",
+	2: "ACTIVATING",
+	3: "TESTING",
+	4: "ACTIVE",
+	5: "FAILED",
+}
+
+var OperStatus_Types_value = map[string]int32{
+	"UNKNOWN":    0,
+	"DISCOVERED": 1,
+	"ACTIVATING": 2,
+	"TESTING":    3,
+	"ACTIVE":     4,
+	"FAILED":     5,
+}
+
+func (x OperStatus_Types) String() string {
+	return proto.EnumName(OperStatus_Types_name, int32(x))
+}
+
+func (OperStatus_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{3, 0}
+}
+
+// Connectivity Status
+type ConnectStatus_Types int32
+
+const (
+	// The device connectivity status is unknown
+	ConnectStatus_UNKNOWN ConnectStatus_Types = 0
+	// The device cannot be reached by Voltha
+	ConnectStatus_UNREACHABLE ConnectStatus_Types = 1
+	// There is live communication between device and Voltha
+	ConnectStatus_REACHABLE ConnectStatus_Types = 2
+)
+
+var ConnectStatus_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "UNREACHABLE",
+	2: "REACHABLE",
+}
+
+var ConnectStatus_Types_value = map[string]int32{
+	"UNKNOWN":     0,
+	"UNREACHABLE": 1,
+	"REACHABLE":   2,
+}
+
+func (x ConnectStatus_Types) String() string {
+	return proto.EnumName(ConnectStatus_Types_name, int32(x))
+}
+
+func (ConnectStatus_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{4, 0}
+}
+
+type OperationResp_OperationReturnCode int32
+
+const (
+	OperationResp_OPERATION_SUCCESS     OperationResp_OperationReturnCode = 0
+	OperationResp_OPERATION_FAILURE     OperationResp_OperationReturnCode = 1
+	OperationResp_OPERATION_UNSUPPORTED OperationResp_OperationReturnCode = 2
+)
+
+var OperationResp_OperationReturnCode_name = map[int32]string{
+	0: "OPERATION_SUCCESS",
+	1: "OPERATION_FAILURE",
+	2: "OPERATION_UNSUPPORTED",
+}
+
+var OperationResp_OperationReturnCode_value = map[string]int32{
+	"OPERATION_SUCCESS":     0,
+	"OPERATION_FAILURE":     1,
+	"OPERATION_UNSUPPORTED": 2,
+}
+
+func (x OperationResp_OperationReturnCode) String() string {
+	return proto.EnumName(OperationResp_OperationReturnCode_name, int32(x))
+}
+
+func (OperationResp_OperationReturnCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{5, 0}
+}
+
+// Convey a resource identifier
+type ID struct {
+	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ID) Reset()         { *m = ID{} }
+func (m *ID) String() string { return proto.CompactTextString(m) }
+func (*ID) ProtoMessage()    {}
+func (*ID) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{0}
+}
+
+func (m *ID) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ID.Unmarshal(m, b)
+}
+func (m *ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ID.Marshal(b, m, deterministic)
+}
+func (m *ID) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ID.Merge(m, src)
+}
+func (m *ID) XXX_Size() int {
+	return xxx_messageInfo_ID.Size(m)
+}
+func (m *ID) XXX_DiscardUnknown() {
+	xxx_messageInfo_ID.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ID proto.InternalMessageInfo
+
+func (m *ID) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+// Represents a list of IDs
+type IDs struct {
+	Items                []*ID    `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IDs) Reset()         { *m = IDs{} }
+func (m *IDs) String() string { return proto.CompactTextString(m) }
+func (*IDs) ProtoMessage()    {}
+func (*IDs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{1}
+}
+
+func (m *IDs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IDs.Unmarshal(m, b)
+}
+func (m *IDs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IDs.Marshal(b, m, deterministic)
+}
+func (m *IDs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IDs.Merge(m, src)
+}
+func (m *IDs) XXX_Size() int {
+	return xxx_messageInfo_IDs.Size(m)
+}
+func (m *IDs) XXX_DiscardUnknown() {
+	xxx_messageInfo_IDs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IDs proto.InternalMessageInfo
+
+func (m *IDs) GetItems() []*ID {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type AdminState struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AdminState) Reset()         { *m = AdminState{} }
+func (m *AdminState) String() string { return proto.CompactTextString(m) }
+func (*AdminState) ProtoMessage()    {}
+func (*AdminState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{2}
+}
+
+func (m *AdminState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AdminState.Unmarshal(m, b)
+}
+func (m *AdminState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AdminState.Marshal(b, m, deterministic)
+}
+func (m *AdminState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdminState.Merge(m, src)
+}
+func (m *AdminState) XXX_Size() int {
+	return xxx_messageInfo_AdminState.Size(m)
+}
+func (m *AdminState) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdminState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdminState proto.InternalMessageInfo
+
+type OperStatus struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OperStatus) Reset()         { *m = OperStatus{} }
+func (m *OperStatus) String() string { return proto.CompactTextString(m) }
+func (*OperStatus) ProtoMessage()    {}
+func (*OperStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{3}
+}
+
+func (m *OperStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OperStatus.Unmarshal(m, b)
+}
+func (m *OperStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OperStatus.Marshal(b, m, deterministic)
+}
+func (m *OperStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OperStatus.Merge(m, src)
+}
+func (m *OperStatus) XXX_Size() int {
+	return xxx_messageInfo_OperStatus.Size(m)
+}
+func (m *OperStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_OperStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OperStatus proto.InternalMessageInfo
+
+type ConnectStatus struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConnectStatus) Reset()         { *m = ConnectStatus{} }
+func (m *ConnectStatus) String() string { return proto.CompactTextString(m) }
+func (*ConnectStatus) ProtoMessage()    {}
+func (*ConnectStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{4}
+}
+
+func (m *ConnectStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConnectStatus.Unmarshal(m, b)
+}
+func (m *ConnectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConnectStatus.Marshal(b, m, deterministic)
+}
+func (m *ConnectStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConnectStatus.Merge(m, src)
+}
+func (m *ConnectStatus) XXX_Size() int {
+	return xxx_messageInfo_ConnectStatus.Size(m)
+}
+func (m *ConnectStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConnectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectStatus proto.InternalMessageInfo
+
+type OperationResp struct {
+	// Return code
+	Code OperationResp_OperationReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=common.OperationResp_OperationReturnCode" json:"code,omitempty"`
+	// Additional Info
+	AdditionalInfo       string   `protobuf:"bytes,2,opt,name=additional_info,json=additionalInfo,proto3" json:"additional_info,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OperationResp) Reset()         { *m = OperationResp{} }
+func (m *OperationResp) String() string { return proto.CompactTextString(m) }
+func (*OperationResp) ProtoMessage()    {}
+func (*OperationResp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{5}
+}
+
+func (m *OperationResp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OperationResp.Unmarshal(m, b)
+}
+func (m *OperationResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OperationResp.Marshal(b, m, deterministic)
+}
+func (m *OperationResp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OperationResp.Merge(m, src)
+}
+func (m *OperationResp) XXX_Size() int {
+	return xxx_messageInfo_OperationResp.Size(m)
+}
+func (m *OperationResp) XXX_DiscardUnknown() {
+	xxx_messageInfo_OperationResp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OperationResp proto.InternalMessageInfo
+
+func (m *OperationResp) GetCode() OperationResp_OperationReturnCode {
+	if m != nil {
+		return m.Code
+	}
+	return OperationResp_OPERATION_SUCCESS
+}
+
+func (m *OperationResp) GetAdditionalInfo() string {
+	if m != nil {
+		return m.AdditionalInfo
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("common.TestModeKeys", TestModeKeys_name, TestModeKeys_value)
+	proto.RegisterEnum("common.AdminState_Types", AdminState_Types_name, AdminState_Types_value)
+	proto.RegisterEnum("common.OperStatus_Types", OperStatus_Types_name, OperStatus_Types_value)
+	proto.RegisterEnum("common.ConnectStatus_Types", ConnectStatus_Types_name, ConnectStatus_Types_value)
+	proto.RegisterEnum("common.OperationResp_OperationReturnCode", OperationResp_OperationReturnCode_name, OperationResp_OperationReturnCode_value)
+	proto.RegisterType((*ID)(nil), "common.ID")
+	proto.RegisterType((*IDs)(nil), "common.IDs")
+	proto.RegisterType((*AdminState)(nil), "common.AdminState")
+	proto.RegisterType((*OperStatus)(nil), "common.OperStatus")
+	proto.RegisterType((*ConnectStatus)(nil), "common.ConnectStatus")
+	proto.RegisterType((*OperationResp)(nil), "common.OperationResp")
+}
+
+func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
+
+var fileDescriptor_c2e3fd231961e826 = []byte{
+	// 480 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xd1, 0x4e, 0xdb, 0x30,
+	0x14, 0x6d, 0x52, 0xda, 0x8d, 0x5b, 0x1a, 0x32, 0x33, 0xa4, 0x6e, 0xda, 0x43, 0x95, 0x17, 0xd8,
+	0xa4, 0xb5, 0x12, 0xec, 0x75, 0x0f, 0x21, 0xf6, 0x3a, 0x8b, 0x62, 0x57, 0x4e, 0x52, 0xb4, 0x3d,
+	0xac, 0x0a, 0x8d, 0x29, 0x91, 0x68, 0x1c, 0x25, 0x2e, 0x12, 0x5f, 0xba, 0xdf, 0x99, 0x9c, 0x14,
+	0x75, 0x48, 0x7d, 0x3c, 0xe7, 0x5c, 0xdf, 0xe3, 0x7b, 0xee, 0x85, 0x8f, 0x4f, 0xea, 0x51, 0x3f,
+	0x24, 0x8b, 0xa2, 0x54, 0x5a, 0x55, 0xe3, 0xa5, 0x5a, 0xaf, 0x55, 0x3e, 0xaa, 0x11, 0xea, 0x36,
+	0xc8, 0x7b, 0x0f, 0x36, 0xc5, 0xc8, 0x01, 0x3b, 0x4b, 0x07, 0xd6, 0xd0, 0x3a, 0x3f, 0x14, 0x76,
+	0x96, 0x7a, 0x67, 0xd0, 0xa6, 0xb8, 0x42, 0x43, 0xe8, 0x64, 0x5a, 0xae, 0xab, 0x81, 0x35, 0x6c,
+	0x9f, 0xf7, 0x2e, 0x60, 0xb4, 0x6d, 0x41, 0xb1, 0x68, 0x04, 0x6f, 0x03, 0xe0, 0xa7, 0xeb, 0x2c,
+	0x0f, 0x75, 0xa2, 0xa5, 0xb7, 0x82, 0x4e, 0xf4, 0x5c, 0xc8, 0x0a, 0xf5, 0xe0, 0x4d, 0xcc, 0xae,
+	0x19, 0xbf, 0x65, 0x6e, 0x0b, 0x21, 0x70, 0x66, 0x82, 0xcc, 0x04, 0x9f, 0xd3, 0x90, 0x72, 0x46,
+	0xb0, 0x6b, 0x99, 0x02, 0xc2, 0xfc, 0xab, 0x29, 0xc1, 0xae, 0x8d, 0x8e, 0xe0, 0x2d, 0xa6, 0x61,
+	0x83, 0xda, 0xe8, 0x14, 0xde, 0x61, 0x7e, 0xcb, 0xa6, 0xdc, 0xc7, 0x94, 0x4d, 0x16, 0xf4, 0xc6,
+	0x9f, 0x10, 0xf7, 0xc0, 0xbc, 0xc0, 0x64, 0x4a, 0x22, 0x82, 0xdd, 0x8e, 0xb7, 0x02, 0xe0, 0x85,
+	0x2c, 0x8d, 0xeb, 0xa6, 0xf2, 0x7e, 0xed, 0xb5, 0x75, 0x00, 0x30, 0x0d, 0x03, 0x3e, 0x27, 0xa2,
+	0xb6, 0x74, 0x00, 0xfc, 0x20, 0xa2, 0x73, 0x3f, 0xa2, 0x6c, 0xe2, 0xda, 0xa6, 0x38, 0x22, 0x61,
+	0x0d, 0xda, 0x08, 0xa0, 0x5b, 0x8b, 0xc6, 0x09, 0xa0, 0xfb, 0xc3, 0xa7, 0xd3, 0xda, 0x88, 0x40,
+	0x3f, 0x50, 0x79, 0x2e, 0x97, 0x7a, 0xeb, 0xf5, 0x6d, 0xaf, 0xd7, 0x31, 0xf4, 0x62, 0x26, 0x88,
+	0x1f, 0xfc, 0x34, 0x53, 0xb8, 0x16, 0xea, 0xc3, 0xe1, 0x0e, 0xda, 0xde, 0x5f, 0x0b, 0xfa, 0xe6,
+	0xc3, 0x89, 0xce, 0x54, 0x2e, 0x64, 0x55, 0xa0, 0xef, 0x70, 0xb0, 0x54, 0xa9, 0xac, 0x33, 0x77,
+	0x2e, 0x3e, 0xbf, 0x24, 0xfb, 0xaa, 0xe8, 0x7f, 0xa4, 0x37, 0x65, 0x1e, 0xa8, 0x54, 0x8a, 0xfa,
+	0x19, 0x3a, 0x83, 0xe3, 0x24, 0x4d, 0x33, 0xa3, 0x25, 0x8f, 0x8b, 0x2c, 0xbf, 0x57, 0x03, 0xbb,
+	0xde, 0x9e, 0xb3, 0xa3, 0x69, 0x7e, 0xaf, 0xbc, 0x3f, 0x70, 0xb2, 0xa7, 0x8b, 0x09, 0x99, 0xcf,
+	0x88, 0xf0, 0x23, 0xca, 0xd9, 0x22, 0x8c, 0x83, 0x80, 0x84, 0xa1, 0xdb, 0x7a, 0x4d, 0x9b, 0x10,
+	0x62, 0x61, 0xa6, 0xf9, 0x00, 0xa7, 0x3b, 0x3a, 0x66, 0x61, 0x3c, 0x9b, 0x71, 0x61, 0x36, 0x61,
+	0x7f, 0xf9, 0x04, 0x47, 0x91, 0xac, 0xf4, 0x8d, 0x4a, 0xe5, 0xb5, 0x7c, 0xae, 0xcc, 0x2e, 0x93,
+	0x22, 0x5b, 0x68, 0x59, 0x69, 0xb7, 0x75, 0x45, 0xe0, 0x44, 0x95, 0xab, 0x91, 0x2a, 0x64, 0xbe,
+	0x54, 0x65, 0x3a, 0x6a, 0x0e, 0xf2, 0xf7, 0x68, 0x95, 0xe9, 0x87, 0xcd, 0x9d, 0x19, 0x7a, 0xfc,
+	0xa2, 0x8d, 0x1b, 0xed, 0xeb, 0xf6, 0x58, 0x9f, 0x2e, 0xc7, 0x2b, 0xb5, 0x3d, 0xd9, 0xbb, 0x6e,
+	0x4d, 0x5e, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x21, 0x5c, 0x35, 0xd1, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/common/meta.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/common/meta.pb.go
new file mode 100644
index 0000000..8fbd621
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/common/meta.pb.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/meta.proto
+
+package common
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Access int32
+
+const (
+	// read-write, stored attribute
+	Access_CONFIG Access = 0
+	// read-only field, stored with the model, covered by its hash
+	Access_READ_ONLY Access = 1
+	// A read-only attribute that is not stored in the model, not covered
+	// by its hash, its value is filled real-time upon each request.
+	Access_REAL_TIME Access = 2
+)
+
+var Access_name = map[int32]string{
+	0: "CONFIG",
+	1: "READ_ONLY",
+	2: "REAL_TIME",
+}
+
+var Access_value = map[string]int32{
+	"CONFIG":    0,
+	"READ_ONLY": 1,
+	"REAL_TIME": 2,
+}
+
+func (x Access) String() string {
+	return proto.EnumName(Access_name, int32(x))
+}
+
+func (Access) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_96b320e8a67781f3, []int{0}
+}
+
+type ChildNode struct {
+	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ChildNode) Reset()         { *m = ChildNode{} }
+func (m *ChildNode) String() string { return proto.CompactTextString(m) }
+func (*ChildNode) ProtoMessage()    {}
+func (*ChildNode) Descriptor() ([]byte, []int) {
+	return fileDescriptor_96b320e8a67781f3, []int{0}
+}
+
+func (m *ChildNode) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ChildNode.Unmarshal(m, b)
+}
+func (m *ChildNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ChildNode.Marshal(b, m, deterministic)
+}
+func (m *ChildNode) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ChildNode.Merge(m, src)
+}
+func (m *ChildNode) XXX_Size() int {
+	return xxx_messageInfo_ChildNode.Size(m)
+}
+func (m *ChildNode) XXX_DiscardUnknown() {
+	xxx_messageInfo_ChildNode.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChildNode proto.InternalMessageInfo
+
+func (m *ChildNode) GetKey() string {
+	if m != nil {
+		return m.Key
+	}
+	return ""
+}
+
+var E_ChildNode = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*ChildNode)(nil),
+	Field:         7761772,
+	Name:          "voltha.child_node",
+	Tag:           "bytes,7761772,opt,name=child_node",
+	Filename:      "voltha_protos/meta.proto",
+}
+
+var E_Access = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*Access)(nil),
+	Field:         7761773,
+	Name:          "voltha.access",
+	Tag:           "varint,7761773,opt,name=access,enum=voltha.Access",
+	Filename:      "voltha_protos/meta.proto",
+}
+
+func init() {
+	proto.RegisterEnum("voltha.Access", Access_name, Access_value)
+	proto.RegisterType((*ChildNode)(nil), "voltha.ChildNode")
+	proto.RegisterExtension(E_ChildNode)
+	proto.RegisterExtension(E_Access)
+}
+
+func init() { proto.RegisterFile("voltha_protos/meta.proto", fileDescriptor_96b320e8a67781f3) }
+
+var fileDescriptor_96b320e8a67781f3 = []byte{
+	// 281 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0xc1, 0x4a, 0xc3, 0x40,
+	0x10, 0x86, 0x8d, 0x85, 0x40, 0x46, 0x2c, 0x31, 0xa7, 0x52, 0x28, 0x04, 0x4f, 0x45, 0x70, 0x17,
+	0xd2, 0x5b, 0x6f, 0xb5, 0xb6, 0x5a, 0x88, 0x09, 0x04, 0x2f, 0x7a, 0x09, 0xc9, 0x66, 0x4d, 0x82,
+	0x49, 0x26, 0x64, 0xb7, 0x05, 0x1f, 0xd5, 0x8b, 0x4f, 0xa0, 0xef, 0x20, 0xc9, 0x66, 0xbd, 0x7a,
+	0xfb, 0x67, 0xf6, 0xdf, 0x8f, 0x8f, 0x81, 0xd9, 0x09, 0x2b, 0x59, 0x24, 0x71, 0xdb, 0xa1, 0x44,
+	0x41, 0x6b, 0x2e, 0x13, 0x32, 0x64, 0xc7, 0x54, 0x2f, 0x73, 0x37, 0x47, 0xcc, 0x2b, 0x4e, 0x87,
+	0x6d, 0x7a, 0x7c, 0xa3, 0x19, 0x17, 0xac, 0x2b, 0x5b, 0x89, 0x9d, 0x6a, 0x5e, 0x2f, 0xc0, 0xda,
+	0x16, 0x65, 0x95, 0x05, 0x98, 0x71, 0xc7, 0x86, 0xc9, 0x3b, 0xff, 0x98, 0x19, 0xae, 0xb1, 0xb4,
+	0xa2, 0x3e, 0xde, 0x78, 0x60, 0x6e, 0x18, 0xe3, 0x42, 0x38, 0x00, 0xe6, 0x36, 0x0c, 0xf6, 0x87,
+	0x07, 0xfb, 0xcc, 0xb9, 0x04, 0x2b, 0xda, 0x6d, 0xee, 0xe3, 0x30, 0xf0, 0x5f, 0x6c, 0x63, 0x1c,
+	0xfd, 0xf8, 0xf9, 0xf0, 0xb4, 0xb3, 0xcf, 0xd7, 0x11, 0x00, 0xeb, 0x91, 0x71, 0xd3, 0x33, 0x17,
+	0x44, 0x39, 0x10, 0xed, 0x40, 0xf6, 0x25, 0xaf, 0xb2, 0xb0, 0x95, 0x25, 0x36, 0x62, 0xf6, 0xfd,
+	0xf5, 0x39, 0x71, 0x8d, 0xe5, 0x85, 0x77, 0x45, 0x94, 0x33, 0xf9, 0xd3, 0x89, 0x2c, 0xa6, 0xe3,
+	0xfa, 0x11, 0xcc, 0x44, 0x79, 0xfc, 0xc3, 0xfb, 0x51, 0xbc, 0xa9, 0x37, 0xd5, 0x3c, 0xe5, 0x1f,
+	0x8d, 0xff, 0xef, 0x7c, 0x98, 0x63, 0x97, 0x13, 0x6c, 0x79, 0xc3, 0xb0, 0xcb, 0x74, 0x8b, 0x61,
+	0x5d, 0x63, 0xf3, 0x4a, 0xf2, 0x52, 0x16, 0xc7, 0xb4, 0x1f, 0xa9, 0xae, 0x50, 0x55, 0xb9, 0x1d,
+	0xcf, 0x7c, 0x5a, 0xd1, 0x1c, 0xa9, 0xea, 0xa7, 0xe6, 0xb0, 0x5c, 0xfd, 0x06, 0x00, 0x00, 0xff,
+	0xff, 0x45, 0x61, 0x57, 0xbf, 0x8b, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_alarm_db.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_alarm_db.pb.go
new file mode 100644
index 0000000..3d20894
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_alarm_db.pb.go
@@ -0,0 +1,516 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/omci_alarm_db.proto
+
+package omci
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AlarmOpenOmciEventType_OpenOmciEventType int32
+
+const (
+	AlarmOpenOmciEventType_state_change AlarmOpenOmciEventType_OpenOmciEventType = 0
+)
+
+var AlarmOpenOmciEventType_OpenOmciEventType_name = map[int32]string{
+	0: "state_change",
+}
+
+var AlarmOpenOmciEventType_OpenOmciEventType_value = map[string]int32{
+	"state_change": 0,
+}
+
+func (x AlarmOpenOmciEventType_OpenOmciEventType) String() string {
+	return proto.EnumName(AlarmOpenOmciEventType_OpenOmciEventType_name, int32(x))
+}
+
+func (AlarmOpenOmciEventType_OpenOmciEventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{6, 0}
+}
+
+type AlarmAttributeData struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value                string   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmAttributeData) Reset()         { *m = AlarmAttributeData{} }
+func (m *AlarmAttributeData) String() string { return proto.CompactTextString(m) }
+func (*AlarmAttributeData) ProtoMessage()    {}
+func (*AlarmAttributeData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{0}
+}
+
+func (m *AlarmAttributeData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmAttributeData.Unmarshal(m, b)
+}
+func (m *AlarmAttributeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmAttributeData.Marshal(b, m, deterministic)
+}
+func (m *AlarmAttributeData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmAttributeData.Merge(m, src)
+}
+func (m *AlarmAttributeData) XXX_Size() int {
+	return xxx_messageInfo_AlarmAttributeData.Size(m)
+}
+func (m *AlarmAttributeData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmAttributeData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmAttributeData proto.InternalMessageInfo
+
+func (m *AlarmAttributeData) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AlarmAttributeData) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type AlarmInstanceData struct {
+	InstanceId           uint32                `protobuf:"varint,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Created              string                `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	Modified             string                `protobuf:"bytes,3,opt,name=modified,proto3" json:"modified,omitempty"`
+	Attributes           []*AlarmAttributeData `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *AlarmInstanceData) Reset()         { *m = AlarmInstanceData{} }
+func (m *AlarmInstanceData) String() string { return proto.CompactTextString(m) }
+func (*AlarmInstanceData) ProtoMessage()    {}
+func (*AlarmInstanceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{1}
+}
+
+func (m *AlarmInstanceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmInstanceData.Unmarshal(m, b)
+}
+func (m *AlarmInstanceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmInstanceData.Marshal(b, m, deterministic)
+}
+func (m *AlarmInstanceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmInstanceData.Merge(m, src)
+}
+func (m *AlarmInstanceData) XXX_Size() int {
+	return xxx_messageInfo_AlarmInstanceData.Size(m)
+}
+func (m *AlarmInstanceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmInstanceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmInstanceData proto.InternalMessageInfo
+
+func (m *AlarmInstanceData) GetInstanceId() uint32 {
+	if m != nil {
+		return m.InstanceId
+	}
+	return 0
+}
+
+func (m *AlarmInstanceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *AlarmInstanceData) GetModified() string {
+	if m != nil {
+		return m.Modified
+	}
+	return ""
+}
+
+func (m *AlarmInstanceData) GetAttributes() []*AlarmAttributeData {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+type AlarmClassData struct {
+	ClassId              uint32               `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Instances            []*AlarmInstanceData `protobuf:"bytes,2,rep,name=instances,proto3" json:"instances,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AlarmClassData) Reset()         { *m = AlarmClassData{} }
+func (m *AlarmClassData) String() string { return proto.CompactTextString(m) }
+func (*AlarmClassData) ProtoMessage()    {}
+func (*AlarmClassData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{2}
+}
+
+func (m *AlarmClassData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmClassData.Unmarshal(m, b)
+}
+func (m *AlarmClassData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmClassData.Marshal(b, m, deterministic)
+}
+func (m *AlarmClassData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmClassData.Merge(m, src)
+}
+func (m *AlarmClassData) XXX_Size() int {
+	return xxx_messageInfo_AlarmClassData.Size(m)
+}
+func (m *AlarmClassData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmClassData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmClassData proto.InternalMessageInfo
+
+func (m *AlarmClassData) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *AlarmClassData) GetInstances() []*AlarmInstanceData {
+	if m != nil {
+		return m.Instances
+	}
+	return nil
+}
+
+type AlarmManagedEntity struct {
+	ClassId              uint32   `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Name                 string   `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmManagedEntity) Reset()         { *m = AlarmManagedEntity{} }
+func (m *AlarmManagedEntity) String() string { return proto.CompactTextString(m) }
+func (*AlarmManagedEntity) ProtoMessage()    {}
+func (*AlarmManagedEntity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{3}
+}
+
+func (m *AlarmManagedEntity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmManagedEntity.Unmarshal(m, b)
+}
+func (m *AlarmManagedEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmManagedEntity.Marshal(b, m, deterministic)
+}
+func (m *AlarmManagedEntity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmManagedEntity.Merge(m, src)
+}
+func (m *AlarmManagedEntity) XXX_Size() int {
+	return xxx_messageInfo_AlarmManagedEntity.Size(m)
+}
+func (m *AlarmManagedEntity) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmManagedEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmManagedEntity proto.InternalMessageInfo
+
+func (m *AlarmManagedEntity) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *AlarmManagedEntity) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AlarmMessageType struct {
+	MessageType          uint32   `protobuf:"varint,1,opt,name=message_type,json=messageType,proto3" json:"message_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmMessageType) Reset()         { *m = AlarmMessageType{} }
+func (m *AlarmMessageType) String() string { return proto.CompactTextString(m) }
+func (*AlarmMessageType) ProtoMessage()    {}
+func (*AlarmMessageType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{4}
+}
+
+func (m *AlarmMessageType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmMessageType.Unmarshal(m, b)
+}
+func (m *AlarmMessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmMessageType.Marshal(b, m, deterministic)
+}
+func (m *AlarmMessageType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmMessageType.Merge(m, src)
+}
+func (m *AlarmMessageType) XXX_Size() int {
+	return xxx_messageInfo_AlarmMessageType.Size(m)
+}
+func (m *AlarmMessageType) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmMessageType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMessageType proto.InternalMessageInfo
+
+func (m *AlarmMessageType) GetMessageType() uint32 {
+	if m != nil {
+		return m.MessageType
+	}
+	return 0
+}
+
+type AlarmDeviceData struct {
+	DeviceId             string                `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Created              string                `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	LastAlarmSequence    uint32                `protobuf:"varint,3,opt,name=last_alarm_sequence,json=lastAlarmSequence,proto3" json:"last_alarm_sequence,omitempty"`
+	LastSyncTime         string                `protobuf:"bytes,4,opt,name=last_sync_time,json=lastSyncTime,proto3" json:"last_sync_time,omitempty"`
+	Version              uint32                `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+	Classes              []*AlarmClassData     `protobuf:"bytes,6,rep,name=classes,proto3" json:"classes,omitempty"`
+	ManagedEntities      []*AlarmManagedEntity `protobuf:"bytes,7,rep,name=managed_entities,json=managedEntities,proto3" json:"managed_entities,omitempty"`
+	MessageTypes         []*AlarmMessageType   `protobuf:"bytes,8,rep,name=message_types,json=messageTypes,proto3" json:"message_types,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *AlarmDeviceData) Reset()         { *m = AlarmDeviceData{} }
+func (m *AlarmDeviceData) String() string { return proto.CompactTextString(m) }
+func (*AlarmDeviceData) ProtoMessage()    {}
+func (*AlarmDeviceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{5}
+}
+
+func (m *AlarmDeviceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmDeviceData.Unmarshal(m, b)
+}
+func (m *AlarmDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmDeviceData.Marshal(b, m, deterministic)
+}
+func (m *AlarmDeviceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmDeviceData.Merge(m, src)
+}
+func (m *AlarmDeviceData) XXX_Size() int {
+	return xxx_messageInfo_AlarmDeviceData.Size(m)
+}
+func (m *AlarmDeviceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmDeviceData proto.InternalMessageInfo
+
+func (m *AlarmDeviceData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetLastAlarmSequence() uint32 {
+	if m != nil {
+		return m.LastAlarmSequence
+	}
+	return 0
+}
+
+func (m *AlarmDeviceData) GetLastSyncTime() string {
+	if m != nil {
+		return m.LastSyncTime
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *AlarmDeviceData) GetClasses() []*AlarmClassData {
+	if m != nil {
+		return m.Classes
+	}
+	return nil
+}
+
+func (m *AlarmDeviceData) GetManagedEntities() []*AlarmManagedEntity {
+	if m != nil {
+		return m.ManagedEntities
+	}
+	return nil
+}
+
+func (m *AlarmDeviceData) GetMessageTypes() []*AlarmMessageType {
+	if m != nil {
+		return m.MessageTypes
+	}
+	return nil
+}
+
+type AlarmOpenOmciEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmOpenOmciEventType) Reset()         { *m = AlarmOpenOmciEventType{} }
+func (m *AlarmOpenOmciEventType) String() string { return proto.CompactTextString(m) }
+func (*AlarmOpenOmciEventType) ProtoMessage()    {}
+func (*AlarmOpenOmciEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{6}
+}
+
+func (m *AlarmOpenOmciEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Unmarshal(m, b)
+}
+func (m *AlarmOpenOmciEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Marshal(b, m, deterministic)
+}
+func (m *AlarmOpenOmciEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmOpenOmciEventType.Merge(m, src)
+}
+func (m *AlarmOpenOmciEventType) XXX_Size() int {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Size(m)
+}
+func (m *AlarmOpenOmciEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmOpenOmciEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmOpenOmciEventType proto.InternalMessageInfo
+
+type AlarmOpenOmciEvent struct {
+	Type                 AlarmOpenOmciEventType_OpenOmciEventType `protobuf:"varint,1,opt,name=type,proto3,enum=omci.AlarmOpenOmciEventType_OpenOmciEventType" json:"type,omitempty"`
+	Data                 string                                   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                 `json:"-"`
+	XXX_unrecognized     []byte                                   `json:"-"`
+	XXX_sizecache        int32                                    `json:"-"`
+}
+
+func (m *AlarmOpenOmciEvent) Reset()         { *m = AlarmOpenOmciEvent{} }
+func (m *AlarmOpenOmciEvent) String() string { return proto.CompactTextString(m) }
+func (*AlarmOpenOmciEvent) ProtoMessage()    {}
+func (*AlarmOpenOmciEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{7}
+}
+
+func (m *AlarmOpenOmciEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Unmarshal(m, b)
+}
+func (m *AlarmOpenOmciEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Marshal(b, m, deterministic)
+}
+func (m *AlarmOpenOmciEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmOpenOmciEvent.Merge(m, src)
+}
+func (m *AlarmOpenOmciEvent) XXX_Size() int {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Size(m)
+}
+func (m *AlarmOpenOmciEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmOpenOmciEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmOpenOmciEvent proto.InternalMessageInfo
+
+func (m *AlarmOpenOmciEvent) GetType() AlarmOpenOmciEventType_OpenOmciEventType {
+	if m != nil {
+		return m.Type
+	}
+	return AlarmOpenOmciEventType_state_change
+}
+
+func (m *AlarmOpenOmciEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("omci.AlarmOpenOmciEventType_OpenOmciEventType", AlarmOpenOmciEventType_OpenOmciEventType_name, AlarmOpenOmciEventType_OpenOmciEventType_value)
+	proto.RegisterType((*AlarmAttributeData)(nil), "omci.AlarmAttributeData")
+	proto.RegisterType((*AlarmInstanceData)(nil), "omci.AlarmInstanceData")
+	proto.RegisterType((*AlarmClassData)(nil), "omci.AlarmClassData")
+	proto.RegisterType((*AlarmManagedEntity)(nil), "omci.AlarmManagedEntity")
+	proto.RegisterType((*AlarmMessageType)(nil), "omci.AlarmMessageType")
+	proto.RegisterType((*AlarmDeviceData)(nil), "omci.AlarmDeviceData")
+	proto.RegisterType((*AlarmOpenOmciEventType)(nil), "omci.AlarmOpenOmciEventType")
+	proto.RegisterType((*AlarmOpenOmciEvent)(nil), "omci.AlarmOpenOmciEvent")
+}
+
+func init() { proto.RegisterFile("voltha_protos/omci_alarm_db.proto", fileDescriptor_8d41f1e38aadb08d) }
+
+var fileDescriptor_8d41f1e38aadb08d = []byte{
+	// 605 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x4e, 0xdb, 0x40,
+	0x10, 0x6d, 0x20, 0x40, 0x98, 0x24, 0x10, 0xb6, 0x88, 0x6e, 0x91, 0x90, 0xa8, 0xd5, 0x56, 0x1c,
+	0x5a, 0x47, 0x82, 0x63, 0x2b, 0x21, 0x02, 0x91, 0x9a, 0x43, 0x85, 0x6a, 0x38, 0xf5, 0x62, 0x6d,
+	0xec, 0xa9, 0x59, 0xc9, 0xbb, 0x4e, 0xbd, 0x1b, 0x4b, 0x39, 0xf4, 0xd2, 0xaf, 0xea, 0x4f, 0xf0,
+	0x13, 0x3d, 0xf5, 0x0b, 0x38, 0x57, 0x1e, 0xdb, 0x89, 0x23, 0x4b, 0x55, 0x6f, 0xfb, 0xde, 0xcc,
+	0xbc, 0x99, 0x9d, 0xb7, 0x5a, 0x78, 0x95, 0x25, 0xb1, 0x7d, 0x10, 0xfe, 0x2c, 0x4d, 0x6c, 0x62,
+	0x86, 0x89, 0x0a, 0xa4, 0x2f, 0x62, 0x91, 0x2a, 0x3f, 0x9c, 0xba, 0x44, 0xb2, 0x76, 0x4e, 0x1e,
+	0xf3, 0xf5, 0x44, 0x85, 0x56, 0x14, 0x71, 0x67, 0x0c, 0xec, 0x2a, 0xaf, 0xb8, 0xb2, 0x36, 0x95,
+	0xd3, 0xb9, 0xc5, 0x1b, 0x61, 0x05, 0x7b, 0x09, 0x6d, 0x2d, 0x14, 0xf2, 0xd6, 0x69, 0xeb, 0x6c,
+	0x77, 0xb4, 0xf5, 0xe7, 0xe9, 0xf1, 0xa4, 0xe5, 0x11, 0xc5, 0x0e, 0x61, 0x2b, 0x13, 0xf1, 0x1c,
+	0xf9, 0x46, 0x1e, 0xf3, 0x0a, 0xe0, 0xfc, 0x6a, 0xc1, 0x01, 0xe9, 0x4c, 0xb4, 0xb1, 0x42, 0x07,
+	0x85, 0xcc, 0x5b, 0xe8, 0xca, 0x12, 0xfb, 0x32, 0x24, 0xb5, 0x7e, 0xa5, 0x06, 0x55, 0x64, 0x12,
+	0x32, 0x0e, 0x3b, 0x41, 0x8a, 0xc2, 0x62, 0x58, 0xaa, 0x56, 0x90, 0x1d, 0x43, 0x47, 0x25, 0xa1,
+	0xfc, 0x26, 0x31, 0xe4, 0x9b, 0x14, 0x5a, 0x62, 0x36, 0x06, 0x10, 0xd5, 0xd4, 0x86, 0xb7, 0x4f,
+	0x37, 0xcf, 0xba, 0xe7, 0xdc, 0xcd, 0xef, 0xeb, 0x36, 0xaf, 0x34, 0xea, 0xfe, 0x7e, 0x7a, 0x3c,
+	0xd9, 0x2e, 0xee, 0xe5, 0xd5, 0x0a, 0x9d, 0x1f, 0xb0, 0x47, 0xe9, 0xd7, 0xb1, 0x30, 0x86, 0xc6,
+	0x3e, 0x85, 0x4e, 0x90, 0x83, 0xc6, 0xcc, 0x3b, 0x44, 0x4f, 0x42, 0x36, 0x81, 0xdd, 0x6a, 0x7c,
+	0xc3, 0x37, 0xa8, 0xf3, 0x8b, 0x5a, 0xe7, 0xfa, 0x12, 0x46, 0x2c, 0x6f, 0xdc, 0x5f, 0xdb, 0x84,
+	0xb7, 0xaa, 0x76, 0xbe, 0x94, 0x06, 0x7c, 0x16, 0x5a, 0x44, 0x18, 0x8e, 0xb5, 0x95, 0x76, 0xf1,
+	0x1f, 0x23, 0x54, 0x16, 0x6d, 0x34, 0x2c, 0x72, 0x3e, 0xc2, 0xa0, 0x90, 0x44, 0x63, 0x44, 0x84,
+	0xf7, 0x8b, 0x19, 0xb2, 0x33, 0xe8, 0xa9, 0x02, 0xfa, 0x76, 0x31, 0xc3, 0x75, 0xd1, 0xae, 0x5a,
+	0x65, 0x3a, 0x3f, 0x37, 0x61, 0x9f, 0xca, 0x6f, 0x30, 0x93, 0xa5, 0x91, 0x0e, 0xec, 0x86, 0x84,
+	0xaa, 0x79, 0x96, 0x1d, 0x3b, 0x05, 0xff, 0x4f, 0x13, 0x5d, 0x78, 0x1e, 0x0b, 0x63, 0xcb, 0xa7,
+	0x69, 0xf0, 0xfb, 0x1c, 0x75, 0x80, 0xe4, 0x67, 0xdf, 0x3b, 0xc8, 0x43, 0xd4, 0xef, 0xae, 0x0c,
+	0xb0, 0xd7, 0xb0, 0x47, 0xf9, 0x66, 0xa1, 0x03, 0xdf, 0x4a, 0x85, 0xbc, 0x4d, 0x82, 0xbd, 0x9c,
+	0xbd, 0x5b, 0xe8, 0xe0, 0x5e, 0x2a, 0xcc, 0xfb, 0x65, 0x98, 0x1a, 0x99, 0x68, 0xbe, 0x45, 0x4a,
+	0x15, 0x64, 0x97, 0x50, 0x6c, 0x09, 0x0d, 0xdf, 0x26, 0x6f, 0x0e, 0x6b, 0xde, 0x2c, 0x6d, 0x1e,
+	0xed, 0xe7, 0xc6, 0xc0, 0x6a, 0xd1, 0x5e, 0x55, 0xc5, 0xae, 0x61, 0xa0, 0x0a, 0x3b, 0x7c, 0xcc,
+	0xfd, 0x90, 0x68, 0xf8, 0x4e, 0xe3, 0x7d, 0xad, 0x39, 0xe6, 0xed, 0xab, 0x1a, 0x94, 0x68, 0xd8,
+	0x07, 0xe8, 0xd7, 0x37, 0x6e, 0x78, 0x87, 0x14, 0x8e, 0xea, 0x0a, 0xab, 0xb5, 0x7b, 0xbd, 0x9a,
+	0x07, 0xc6, 0xb9, 0x84, 0x23, 0xca, 0xb8, 0x9d, 0xa1, 0xbe, 0x55, 0x81, 0x1c, 0x67, 0xa8, 0x2d,
+	0xd9, 0xf3, 0x06, 0x0e, 0x1a, 0x24, 0x1b, 0x40, 0xcf, 0x58, 0x61, 0xd1, 0x0f, 0x1e, 0x84, 0x8e,
+	0x70, 0xf0, 0xcc, 0x89, 0xcb, 0x67, 0xb5, 0x96, 0xcb, 0x46, 0xd0, 0x5e, 0xba, 0xbf, 0x77, 0xee,
+	0xd6, 0x46, 0x69, 0x68, 0xba, 0x0d, 0xc6, 0xa3, 0x5a, 0xc6, 0xa0, 0x1d, 0x0a, 0x2b, 0x4a, 0x93,
+	0xe9, 0x3c, 0xfa, 0x04, 0x3c, 0x49, 0x23, 0x37, 0x99, 0xa1, 0x0e, 0x92, 0x34, 0x74, 0x8b, 0xef,
+	0x86, 0xe4, 0xbf, 0xbe, 0x8b, 0xa4, 0x7d, 0x98, 0x4f, 0xdd, 0x20, 0x51, 0xc3, 0x2a, 0x61, 0x58,
+	0x24, 0xbc, 0x2f, 0xff, 0xa3, 0xec, 0x62, 0x18, 0x25, 0xf4, 0x7d, 0x4d, 0xb7, 0x89, 0xba, 0xf8,
+	0x1b, 0x00, 0x00, 0xff, 0xff, 0x79, 0xe4, 0xce, 0x1a, 0xdb, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_mib_db.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_mib_db.pb.go
new file mode 100644
index 0000000..a3f23af
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/omci/omci_mib_db.pb.go
@@ -0,0 +1,516 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/omci_mib_db.proto
+
+package omci
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type OpenOmciEventType_Types int32
+
+const (
+	OpenOmciEventType_state_change OpenOmciEventType_Types = 0
+)
+
+var OpenOmciEventType_Types_name = map[int32]string{
+	0: "state_change",
+}
+
+var OpenOmciEventType_Types_value = map[string]int32{
+	"state_change": 0,
+}
+
+func (x OpenOmciEventType_Types) String() string {
+	return proto.EnumName(OpenOmciEventType_Types_name, int32(x))
+}
+
+func (OpenOmciEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{6, 0}
+}
+
+type MibAttributeData struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value                string   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MibAttributeData) Reset()         { *m = MibAttributeData{} }
+func (m *MibAttributeData) String() string { return proto.CompactTextString(m) }
+func (*MibAttributeData) ProtoMessage()    {}
+func (*MibAttributeData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{0}
+}
+
+func (m *MibAttributeData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibAttributeData.Unmarshal(m, b)
+}
+func (m *MibAttributeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibAttributeData.Marshal(b, m, deterministic)
+}
+func (m *MibAttributeData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibAttributeData.Merge(m, src)
+}
+func (m *MibAttributeData) XXX_Size() int {
+	return xxx_messageInfo_MibAttributeData.Size(m)
+}
+func (m *MibAttributeData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibAttributeData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibAttributeData proto.InternalMessageInfo
+
+func (m *MibAttributeData) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *MibAttributeData) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type MibInstanceData struct {
+	InstanceId           uint32              `protobuf:"varint,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Created              string              `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	Modified             string              `protobuf:"bytes,3,opt,name=modified,proto3" json:"modified,omitempty"`
+	Attributes           []*MibAttributeData `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *MibInstanceData) Reset()         { *m = MibInstanceData{} }
+func (m *MibInstanceData) String() string { return proto.CompactTextString(m) }
+func (*MibInstanceData) ProtoMessage()    {}
+func (*MibInstanceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{1}
+}
+
+func (m *MibInstanceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibInstanceData.Unmarshal(m, b)
+}
+func (m *MibInstanceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibInstanceData.Marshal(b, m, deterministic)
+}
+func (m *MibInstanceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibInstanceData.Merge(m, src)
+}
+func (m *MibInstanceData) XXX_Size() int {
+	return xxx_messageInfo_MibInstanceData.Size(m)
+}
+func (m *MibInstanceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibInstanceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibInstanceData proto.InternalMessageInfo
+
+func (m *MibInstanceData) GetInstanceId() uint32 {
+	if m != nil {
+		return m.InstanceId
+	}
+	return 0
+}
+
+func (m *MibInstanceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *MibInstanceData) GetModified() string {
+	if m != nil {
+		return m.Modified
+	}
+	return ""
+}
+
+func (m *MibInstanceData) GetAttributes() []*MibAttributeData {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+type MibClassData struct {
+	ClassId              uint32             `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Instances            []*MibInstanceData `protobuf:"bytes,2,rep,name=instances,proto3" json:"instances,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MibClassData) Reset()         { *m = MibClassData{} }
+func (m *MibClassData) String() string { return proto.CompactTextString(m) }
+func (*MibClassData) ProtoMessage()    {}
+func (*MibClassData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{2}
+}
+
+func (m *MibClassData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibClassData.Unmarshal(m, b)
+}
+func (m *MibClassData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibClassData.Marshal(b, m, deterministic)
+}
+func (m *MibClassData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibClassData.Merge(m, src)
+}
+func (m *MibClassData) XXX_Size() int {
+	return xxx_messageInfo_MibClassData.Size(m)
+}
+func (m *MibClassData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibClassData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibClassData proto.InternalMessageInfo
+
+func (m *MibClassData) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *MibClassData) GetInstances() []*MibInstanceData {
+	if m != nil {
+		return m.Instances
+	}
+	return nil
+}
+
+type ManagedEntity struct {
+	ClassId              uint32   `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Name                 string   `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ManagedEntity) Reset()         { *m = ManagedEntity{} }
+func (m *ManagedEntity) String() string { return proto.CompactTextString(m) }
+func (*ManagedEntity) ProtoMessage()    {}
+func (*ManagedEntity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{3}
+}
+
+func (m *ManagedEntity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ManagedEntity.Unmarshal(m, b)
+}
+func (m *ManagedEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ManagedEntity.Marshal(b, m, deterministic)
+}
+func (m *ManagedEntity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ManagedEntity.Merge(m, src)
+}
+func (m *ManagedEntity) XXX_Size() int {
+	return xxx_messageInfo_ManagedEntity.Size(m)
+}
+func (m *ManagedEntity) XXX_DiscardUnknown() {
+	xxx_messageInfo_ManagedEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ManagedEntity proto.InternalMessageInfo
+
+func (m *ManagedEntity) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *ManagedEntity) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type MessageType struct {
+	MessageType          uint32   `protobuf:"varint,1,opt,name=message_type,json=messageType,proto3" json:"message_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MessageType) Reset()         { *m = MessageType{} }
+func (m *MessageType) String() string { return proto.CompactTextString(m) }
+func (*MessageType) ProtoMessage()    {}
+func (*MessageType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{4}
+}
+
+func (m *MessageType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageType.Unmarshal(m, b)
+}
+func (m *MessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageType.Marshal(b, m, deterministic)
+}
+func (m *MessageType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageType.Merge(m, src)
+}
+func (m *MessageType) XXX_Size() int {
+	return xxx_messageInfo_MessageType.Size(m)
+}
+func (m *MessageType) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageType proto.InternalMessageInfo
+
+func (m *MessageType) GetMessageType() uint32 {
+	if m != nil {
+		return m.MessageType
+	}
+	return 0
+}
+
+type MibDeviceData struct {
+	DeviceId             string           `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Created              string           `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	LastSyncTime         string           `protobuf:"bytes,3,opt,name=last_sync_time,json=lastSyncTime,proto3" json:"last_sync_time,omitempty"`
+	MibDataSync          uint32           `protobuf:"varint,4,opt,name=mib_data_sync,json=mibDataSync,proto3" json:"mib_data_sync,omitempty"`
+	Version              uint32           `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+	Classes              []*MibClassData  `protobuf:"bytes,6,rep,name=classes,proto3" json:"classes,omitempty"`
+	ManagedEntities      []*ManagedEntity `protobuf:"bytes,7,rep,name=managed_entities,json=managedEntities,proto3" json:"managed_entities,omitempty"`
+	MessageTypes         []*MessageType   `protobuf:"bytes,8,rep,name=message_types,json=messageTypes,proto3" json:"message_types,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *MibDeviceData) Reset()         { *m = MibDeviceData{} }
+func (m *MibDeviceData) String() string { return proto.CompactTextString(m) }
+func (*MibDeviceData) ProtoMessage()    {}
+func (*MibDeviceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{5}
+}
+
+func (m *MibDeviceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibDeviceData.Unmarshal(m, b)
+}
+func (m *MibDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibDeviceData.Marshal(b, m, deterministic)
+}
+func (m *MibDeviceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibDeviceData.Merge(m, src)
+}
+func (m *MibDeviceData) XXX_Size() int {
+	return xxx_messageInfo_MibDeviceData.Size(m)
+}
+func (m *MibDeviceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibDeviceData proto.InternalMessageInfo
+
+func (m *MibDeviceData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetLastSyncTime() string {
+	if m != nil {
+		return m.LastSyncTime
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetMibDataSync() uint32 {
+	if m != nil {
+		return m.MibDataSync
+	}
+	return 0
+}
+
+func (m *MibDeviceData) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *MibDeviceData) GetClasses() []*MibClassData {
+	if m != nil {
+		return m.Classes
+	}
+	return nil
+}
+
+func (m *MibDeviceData) GetManagedEntities() []*ManagedEntity {
+	if m != nil {
+		return m.ManagedEntities
+	}
+	return nil
+}
+
+func (m *MibDeviceData) GetMessageTypes() []*MessageType {
+	if m != nil {
+		return m.MessageTypes
+	}
+	return nil
+}
+
+type OpenOmciEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OpenOmciEventType) Reset()         { *m = OpenOmciEventType{} }
+func (m *OpenOmciEventType) String() string { return proto.CompactTextString(m) }
+func (*OpenOmciEventType) ProtoMessage()    {}
+func (*OpenOmciEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{6}
+}
+
+func (m *OpenOmciEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OpenOmciEventType.Unmarshal(m, b)
+}
+func (m *OpenOmciEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OpenOmciEventType.Marshal(b, m, deterministic)
+}
+func (m *OpenOmciEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OpenOmciEventType.Merge(m, src)
+}
+func (m *OpenOmciEventType) XXX_Size() int {
+	return xxx_messageInfo_OpenOmciEventType.Size(m)
+}
+func (m *OpenOmciEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_OpenOmciEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenOmciEventType proto.InternalMessageInfo
+
+type OpenOmciEvent struct {
+	Type                 OpenOmciEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=omci.OpenOmciEventType_Types" json:"type,omitempty"`
+	Data                 string                  `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *OpenOmciEvent) Reset()         { *m = OpenOmciEvent{} }
+func (m *OpenOmciEvent) String() string { return proto.CompactTextString(m) }
+func (*OpenOmciEvent) ProtoMessage()    {}
+func (*OpenOmciEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{7}
+}
+
+func (m *OpenOmciEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OpenOmciEvent.Unmarshal(m, b)
+}
+func (m *OpenOmciEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OpenOmciEvent.Marshal(b, m, deterministic)
+}
+func (m *OpenOmciEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OpenOmciEvent.Merge(m, src)
+}
+func (m *OpenOmciEvent) XXX_Size() int {
+	return xxx_messageInfo_OpenOmciEvent.Size(m)
+}
+func (m *OpenOmciEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_OpenOmciEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenOmciEvent proto.InternalMessageInfo
+
+func (m *OpenOmciEvent) GetType() OpenOmciEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return OpenOmciEventType_state_change
+}
+
+func (m *OpenOmciEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("omci.OpenOmciEventType_Types", OpenOmciEventType_Types_name, OpenOmciEventType_Types_value)
+	proto.RegisterType((*MibAttributeData)(nil), "omci.MibAttributeData")
+	proto.RegisterType((*MibInstanceData)(nil), "omci.MibInstanceData")
+	proto.RegisterType((*MibClassData)(nil), "omci.MibClassData")
+	proto.RegisterType((*ManagedEntity)(nil), "omci.ManagedEntity")
+	proto.RegisterType((*MessageType)(nil), "omci.MessageType")
+	proto.RegisterType((*MibDeviceData)(nil), "omci.MibDeviceData")
+	proto.RegisterType((*OpenOmciEventType)(nil), "omci.OpenOmciEventType")
+	proto.RegisterType((*OpenOmciEvent)(nil), "omci.OpenOmciEvent")
+}
+
+func init() { proto.RegisterFile("voltha_protos/omci_mib_db.proto", fileDescriptor_4fa402a2df36dcc1) }
+
+var fileDescriptor_4fa402a2df36dcc1 = []byte{
+	// 598 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6a, 0xdb, 0x4c,
+	0x14, 0xfd, 0xec, 0xd8, 0x89, 0x73, 0x6d, 0x25, 0xce, 0x7c, 0x6d, 0x99, 0x04, 0x42, 0x83, 0x28,
+	0x25, 0x8b, 0x56, 0xa6, 0x09, 0xb4, 0xab, 0x16, 0xea, 0x24, 0xb4, 0x81, 0x8a, 0x80, 0x1a, 0xba,
+	0xe8, 0x46, 0x8c, 0xa4, 0x5b, 0x65, 0xc0, 0x33, 0x32, 0x9a, 0x89, 0x40, 0xef, 0xd5, 0xd7, 0xc8,
+	0x4b, 0x74, 0x95, 0x27, 0xc8, 0xba, 0xcc, 0xe8, 0xc7, 0x72, 0x03, 0xa5, 0x3b, 0x9d, 0xfb, 0x73,
+	0xee, 0xbd, 0xe7, 0x88, 0x81, 0xe7, 0x45, 0xb6, 0xd0, 0x37, 0x2c, 0x5c, 0xe6, 0x99, 0xce, 0xd4,
+	0x2c, 0x13, 0x31, 0x0f, 0x05, 0x8f, 0xc2, 0x24, 0xf2, 0x6c, 0x88, 0x0c, 0x4c, 0xe8, 0x80, 0xae,
+	0x97, 0x09, 0xd4, 0xac, 0xca, 0xbb, 0x67, 0x30, 0xf5, 0x79, 0xf4, 0x51, 0xeb, 0x9c, 0x47, 0xb7,
+	0x1a, 0xcf, 0x99, 0x66, 0x64, 0x1f, 0x06, 0x92, 0x09, 0xa4, 0xbd, 0xa3, 0xde, 0xf1, 0xf6, 0x7c,
+	0x78, 0xff, 0x70, 0x77, 0xd8, 0x0b, 0x6c, 0x88, 0x3c, 0x81, 0x61, 0xc1, 0x16, 0xb7, 0x48, 0xfb,
+	0x26, 0x17, 0x54, 0xc0, 0xfd, 0xd9, 0x83, 0x5d, 0x9f, 0x47, 0x97, 0x52, 0x69, 0x26, 0xe3, 0x8a,
+	0xe4, 0x25, 0x8c, 0x79, 0x8d, 0x43, 0x9e, 0x58, 0x2e, 0xa7, 0xe1, 0x82, 0x26, 0x73, 0x99, 0x10,
+	0x0a, 0x5b, 0x71, 0x8e, 0x4c, 0x63, 0x52, 0x73, 0x36, 0x90, 0x1c, 0xc0, 0x48, 0x64, 0x09, 0xff,
+	0xc1, 0x31, 0xa1, 0x1b, 0x36, 0xd5, 0x62, 0x72, 0x06, 0xc0, 0x9a, 0x9d, 0x15, 0x1d, 0x1c, 0x6d,
+	0x1c, 0x8f, 0x4f, 0x9e, 0x79, 0xe6, 0x56, 0xef, 0xcf, 0x73, 0xe6, 0xe3, 0x5f, 0x0f, 0x77, 0x87,
+	0x9b, 0xd5, 0x4d, 0x41, 0xa7, 0xcd, 0x2d, 0x61, 0xe2, 0xf3, 0xe8, 0x6c, 0xc1, 0x94, 0xb2, 0x2b,
+	0x1f, 0xc1, 0x28, 0x36, 0xe0, 0xd1, 0xbe, 0x5b, 0x36, 0x7c, 0x99, 0x90, 0x4f, 0xb0, 0xdd, 0xac,
+	0xae, 0x68, 0xdf, 0x4e, 0x7d, 0xda, 0x4e, 0xed, 0x9e, 0x3f, 0x27, 0x66, 0xa8, 0xb3, 0xa6, 0x41,
+	0xb0, 0xea, 0x75, 0xbf, 0x80, 0xe3, 0x33, 0xc9, 0x52, 0x4c, 0x2e, 0xa4, 0xe6, 0xba, 0xfc, 0x87,
+	0xd9, 0x8d, 0x2b, 0xfd, 0x47, 0xae, 0xb8, 0xef, 0x60, 0xec, 0xa3, 0x52, 0x2c, 0xc5, 0xeb, 0x72,
+	0x89, 0xe4, 0x18, 0x26, 0xa2, 0x82, 0xa1, 0x2e, 0x97, 0xb8, 0xce, 0x37, 0x16, 0xab, 0x4a, 0xf7,
+	0xbe, 0x0f, 0x8e, 0xcf, 0xa3, 0x73, 0x2c, 0x78, 0x6d, 0x9b, 0x0b, 0xdb, 0x89, 0x45, 0xcd, 0x22,
+	0xed, 0xa8, 0x51, 0x15, 0xff, 0xab, 0x65, 0x2f, 0x60, 0x67, 0xc1, 0x94, 0x0e, 0x55, 0x29, 0xe3,
+	0x50, 0x73, 0x81, 0xb5, 0x71, 0x13, 0x13, 0xfd, 0x5a, 0xca, 0xf8, 0x9a, 0x0b, 0x24, 0x2e, 0x38,
+	0xf6, 0x1f, 0x65, 0x9a, 0xd9, 0x4a, 0x3a, 0x30, 0x0b, 0x06, 0x63, 0xc1, 0x23, 0xb3, 0x83, 0xa9,
+	0x33, 0x33, 0x0a, 0xcc, 0x15, 0xcf, 0x24, 0x1d, 0xda, 0x6c, 0x03, 0xc9, 0x7b, 0xa8, 0x24, 0x41,
+	0x45, 0x37, 0xad, 0x03, 0xa4, 0x75, 0xa0, 0xb5, 0x72, 0xbe, 0x6b, 0xe4, 0x87, 0x95, 0xa6, 0x41,
+	0xd3, 0x43, 0x3e, 0xc0, 0x54, 0x54, 0xca, 0x87, 0x68, 0xa4, 0xe7, 0xa8, 0xe8, 0x96, 0xe5, 0xf9,
+	0xbf, 0xe6, 0xe9, 0xfa, 0x12, 0xec, 0x8a, 0x0e, 0xe4, 0xa8, 0xc8, 0x5b, 0x70, 0xba, 0xe2, 0x2a,
+	0x3a, 0xb2, 0xcd, 0x7b, 0x75, 0xf3, 0x4a, 0xdc, 0x60, 0xd2, 0x51, 0x5a, 0xb9, 0x1e, 0xec, 0x5d,
+	0x2d, 0x51, 0x5e, 0x89, 0x98, 0x5f, 0x14, 0x28, 0xb5, 0xd5, 0x7f, 0x1f, 0x86, 0x36, 0x4b, 0xa6,
+	0x30, 0x51, 0x9a, 0x69, 0x0c, 0xe3, 0x1b, 0x26, 0x53, 0x9c, 0xfe, 0xe7, 0x7e, 0x03, 0x67, 0xad,
+	0x9e, 0xbc, 0x81, 0x41, 0xeb, 0xe6, 0xce, 0xc9, 0x61, 0x35, 0xef, 0x11, 0xa5, 0x67, 0xf9, 0x02,
+	0x5b, 0x4a, 0x08, 0x0c, 0x8c, 0xc8, 0xb5, 0x4b, 0xf6, 0x7b, 0xfe, 0x19, 0x68, 0x96, 0xa7, 0x5e,
+	0xb6, 0x44, 0x19, 0x67, 0x79, 0xe2, 0x55, 0x2f, 0x83, 0x65, 0xfb, 0xfe, 0x2a, 0xe5, 0xfa, 0xe6,
+	0x36, 0xf2, 0xe2, 0x4c, 0xcc, 0x9a, 0x82, 0x59, 0x55, 0xf0, 0xba, 0x7e, 0x3a, 0x8a, 0xd3, 0x59,
+	0x9a, 0xd9, 0x77, 0x26, 0xda, 0xb4, 0xa1, 0xd3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x26, 0xab,
+	0x52, 0x5b, 0x84, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/openflow_13/openflow_13.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/openflow_13/openflow_13.pb.go
new file mode 100644
index 0000000..e3c00f3
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/openflow_13/openflow_13.pb.go
@@ -0,0 +1,9814 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/openflow_13.proto
+
+package openflow_13
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Port numbering. Ports are numbered starting from 1.
+type OfpPortNo int32
+
+const (
+	OfpPortNo_OFPP_INVALID OfpPortNo = 0
+	// Maximum number of physical and logical switch ports.
+	OfpPortNo_OFPP_MAX OfpPortNo = 2147483392
+	// Reserved OpenFlow Port (fake output "ports").
+	OfpPortNo_OFPP_IN_PORT    OfpPortNo = 2147483640
+	OfpPortNo_OFPP_TABLE      OfpPortNo = 2147483641
+	OfpPortNo_OFPP_NORMAL     OfpPortNo = 2147483642
+	OfpPortNo_OFPP_FLOOD      OfpPortNo = 2147483643
+	OfpPortNo_OFPP_ALL        OfpPortNo = 2147483644
+	OfpPortNo_OFPP_CONTROLLER OfpPortNo = 2147483645
+	OfpPortNo_OFPP_LOCAL      OfpPortNo = 2147483646
+	OfpPortNo_OFPP_ANY        OfpPortNo = 2147483647
+)
+
+var OfpPortNo_name = map[int32]string{
+	0:          "OFPP_INVALID",
+	2147483392: "OFPP_MAX",
+	2147483640: "OFPP_IN_PORT",
+	2147483641: "OFPP_TABLE",
+	2147483642: "OFPP_NORMAL",
+	2147483643: "OFPP_FLOOD",
+	2147483644: "OFPP_ALL",
+	2147483645: "OFPP_CONTROLLER",
+	2147483646: "OFPP_LOCAL",
+	2147483647: "OFPP_ANY",
+}
+
+var OfpPortNo_value = map[string]int32{
+	"OFPP_INVALID":    0,
+	"OFPP_MAX":        2147483392,
+	"OFPP_IN_PORT":    2147483640,
+	"OFPP_TABLE":      2147483641,
+	"OFPP_NORMAL":     2147483642,
+	"OFPP_FLOOD":      2147483643,
+	"OFPP_ALL":        2147483644,
+	"OFPP_CONTROLLER": 2147483645,
+	"OFPP_LOCAL":      2147483646,
+	"OFPP_ANY":        2147483647,
+}
+
+func (x OfpPortNo) String() string {
+	return proto.EnumName(OfpPortNo_name, int32(x))
+}
+
+func (OfpPortNo) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{0}
+}
+
+type OfpType int32
+
+const (
+	// Immutable messages.
+	OfpType_OFPT_HELLO        OfpType = 0
+	OfpType_OFPT_ERROR        OfpType = 1
+	OfpType_OFPT_ECHO_REQUEST OfpType = 2
+	OfpType_OFPT_ECHO_REPLY   OfpType = 3
+	OfpType_OFPT_EXPERIMENTER OfpType = 4
+	// Switch configuration messages.
+	OfpType_OFPT_FEATURES_REQUEST   OfpType = 5
+	OfpType_OFPT_FEATURES_REPLY     OfpType = 6
+	OfpType_OFPT_GET_CONFIG_REQUEST OfpType = 7
+	OfpType_OFPT_GET_CONFIG_REPLY   OfpType = 8
+	OfpType_OFPT_SET_CONFIG         OfpType = 9
+	// Asynchronous messages.
+	OfpType_OFPT_PACKET_IN    OfpType = 10
+	OfpType_OFPT_FLOW_REMOVED OfpType = 11
+	OfpType_OFPT_PORT_STATUS  OfpType = 12
+	// Controller command messages.
+	OfpType_OFPT_PACKET_OUT OfpType = 13
+	OfpType_OFPT_FLOW_MOD   OfpType = 14
+	OfpType_OFPT_GROUP_MOD  OfpType = 15
+	OfpType_OFPT_PORT_MOD   OfpType = 16
+	OfpType_OFPT_TABLE_MOD  OfpType = 17
+	// Multipart messages.
+	OfpType_OFPT_MULTIPART_REQUEST OfpType = 18
+	OfpType_OFPT_MULTIPART_REPLY   OfpType = 19
+	// Barrier messages.
+	OfpType_OFPT_BARRIER_REQUEST OfpType = 20
+	OfpType_OFPT_BARRIER_REPLY   OfpType = 21
+	// Queue Configuration messages.
+	OfpType_OFPT_QUEUE_GET_CONFIG_REQUEST OfpType = 22
+	OfpType_OFPT_QUEUE_GET_CONFIG_REPLY   OfpType = 23
+	// Controller role change request messages.
+	OfpType_OFPT_ROLE_REQUEST OfpType = 24
+	OfpType_OFPT_ROLE_REPLY   OfpType = 25
+	// Asynchronous message configuration.
+	OfpType_OFPT_GET_ASYNC_REQUEST OfpType = 26
+	OfpType_OFPT_GET_ASYNC_REPLY   OfpType = 27
+	OfpType_OFPT_SET_ASYNC         OfpType = 28
+	// Meters and rate limiters configuration messages.
+	OfpType_OFPT_METER_MOD OfpType = 29
+)
+
+var OfpType_name = map[int32]string{
+	0:  "OFPT_HELLO",
+	1:  "OFPT_ERROR",
+	2:  "OFPT_ECHO_REQUEST",
+	3:  "OFPT_ECHO_REPLY",
+	4:  "OFPT_EXPERIMENTER",
+	5:  "OFPT_FEATURES_REQUEST",
+	6:  "OFPT_FEATURES_REPLY",
+	7:  "OFPT_GET_CONFIG_REQUEST",
+	8:  "OFPT_GET_CONFIG_REPLY",
+	9:  "OFPT_SET_CONFIG",
+	10: "OFPT_PACKET_IN",
+	11: "OFPT_FLOW_REMOVED",
+	12: "OFPT_PORT_STATUS",
+	13: "OFPT_PACKET_OUT",
+	14: "OFPT_FLOW_MOD",
+	15: "OFPT_GROUP_MOD",
+	16: "OFPT_PORT_MOD",
+	17: "OFPT_TABLE_MOD",
+	18: "OFPT_MULTIPART_REQUEST",
+	19: "OFPT_MULTIPART_REPLY",
+	20: "OFPT_BARRIER_REQUEST",
+	21: "OFPT_BARRIER_REPLY",
+	22: "OFPT_QUEUE_GET_CONFIG_REQUEST",
+	23: "OFPT_QUEUE_GET_CONFIG_REPLY",
+	24: "OFPT_ROLE_REQUEST",
+	25: "OFPT_ROLE_REPLY",
+	26: "OFPT_GET_ASYNC_REQUEST",
+	27: "OFPT_GET_ASYNC_REPLY",
+	28: "OFPT_SET_ASYNC",
+	29: "OFPT_METER_MOD",
+}
+
+var OfpType_value = map[string]int32{
+	"OFPT_HELLO":                    0,
+	"OFPT_ERROR":                    1,
+	"OFPT_ECHO_REQUEST":             2,
+	"OFPT_ECHO_REPLY":               3,
+	"OFPT_EXPERIMENTER":             4,
+	"OFPT_FEATURES_REQUEST":         5,
+	"OFPT_FEATURES_REPLY":           6,
+	"OFPT_GET_CONFIG_REQUEST":       7,
+	"OFPT_GET_CONFIG_REPLY":         8,
+	"OFPT_SET_CONFIG":               9,
+	"OFPT_PACKET_IN":                10,
+	"OFPT_FLOW_REMOVED":             11,
+	"OFPT_PORT_STATUS":              12,
+	"OFPT_PACKET_OUT":               13,
+	"OFPT_FLOW_MOD":                 14,
+	"OFPT_GROUP_MOD":                15,
+	"OFPT_PORT_MOD":                 16,
+	"OFPT_TABLE_MOD":                17,
+	"OFPT_MULTIPART_REQUEST":        18,
+	"OFPT_MULTIPART_REPLY":          19,
+	"OFPT_BARRIER_REQUEST":          20,
+	"OFPT_BARRIER_REPLY":            21,
+	"OFPT_QUEUE_GET_CONFIG_REQUEST": 22,
+	"OFPT_QUEUE_GET_CONFIG_REPLY":   23,
+	"OFPT_ROLE_REQUEST":             24,
+	"OFPT_ROLE_REPLY":               25,
+	"OFPT_GET_ASYNC_REQUEST":        26,
+	"OFPT_GET_ASYNC_REPLY":          27,
+	"OFPT_SET_ASYNC":                28,
+	"OFPT_METER_MOD":                29,
+}
+
+func (x OfpType) String() string {
+	return proto.EnumName(OfpType_name, int32(x))
+}
+
+func (OfpType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{1}
+}
+
+// Hello elements types.
+type OfpHelloElemType int32
+
+const (
+	OfpHelloElemType_OFPHET_INVALID       OfpHelloElemType = 0
+	OfpHelloElemType_OFPHET_VERSIONBITMAP OfpHelloElemType = 1
+)
+
+var OfpHelloElemType_name = map[int32]string{
+	0: "OFPHET_INVALID",
+	1: "OFPHET_VERSIONBITMAP",
+}
+
+var OfpHelloElemType_value = map[string]int32{
+	"OFPHET_INVALID":       0,
+	"OFPHET_VERSIONBITMAP": 1,
+}
+
+func (x OfpHelloElemType) String() string {
+	return proto.EnumName(OfpHelloElemType_name, int32(x))
+}
+
+func (OfpHelloElemType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{2}
+}
+
+type OfpConfigFlags int32
+
+const (
+	// Handling of IP fragments.
+	OfpConfigFlags_OFPC_FRAG_NORMAL OfpConfigFlags = 0
+	OfpConfigFlags_OFPC_FRAG_DROP   OfpConfigFlags = 1
+	OfpConfigFlags_OFPC_FRAG_REASM  OfpConfigFlags = 2
+	OfpConfigFlags_OFPC_FRAG_MASK   OfpConfigFlags = 3
+)
+
+var OfpConfigFlags_name = map[int32]string{
+	0: "OFPC_FRAG_NORMAL",
+	1: "OFPC_FRAG_DROP",
+	2: "OFPC_FRAG_REASM",
+	3: "OFPC_FRAG_MASK",
+}
+
+var OfpConfigFlags_value = map[string]int32{
+	"OFPC_FRAG_NORMAL": 0,
+	"OFPC_FRAG_DROP":   1,
+	"OFPC_FRAG_REASM":  2,
+	"OFPC_FRAG_MASK":   3,
+}
+
+func (x OfpConfigFlags) String() string {
+	return proto.EnumName(OfpConfigFlags_name, int32(x))
+}
+
+func (OfpConfigFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{3}
+}
+
+// Flags to configure the table. Reserved for future use.
+type OfpTableConfig int32
+
+const (
+	OfpTableConfig_OFPTC_INVALID         OfpTableConfig = 0
+	OfpTableConfig_OFPTC_DEPRECATED_MASK OfpTableConfig = 3
+)
+
+var OfpTableConfig_name = map[int32]string{
+	0: "OFPTC_INVALID",
+	3: "OFPTC_DEPRECATED_MASK",
+}
+
+var OfpTableConfig_value = map[string]int32{
+	"OFPTC_INVALID":         0,
+	"OFPTC_DEPRECATED_MASK": 3,
+}
+
+func (x OfpTableConfig) String() string {
+	return proto.EnumName(OfpTableConfig_name, int32(x))
+}
+
+func (OfpTableConfig) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{4}
+}
+
+// Table numbering. Tables can use any number up to OFPT_MAX.
+type OfpTable int32
+
+const (
+	OfpTable_OFPTT_INVALID OfpTable = 0
+	// Last usable table number.
+	OfpTable_OFPTT_MAX OfpTable = 254
+	// Fake tables.
+	OfpTable_OFPTT_ALL OfpTable = 255
+)
+
+var OfpTable_name = map[int32]string{
+	0:   "OFPTT_INVALID",
+	254: "OFPTT_MAX",
+	255: "OFPTT_ALL",
+}
+
+var OfpTable_value = map[string]int32{
+	"OFPTT_INVALID": 0,
+	"OFPTT_MAX":     254,
+	"OFPTT_ALL":     255,
+}
+
+func (x OfpTable) String() string {
+	return proto.EnumName(OfpTable_name, int32(x))
+}
+
+func (OfpTable) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{5}
+}
+
+// Capabilities supported by the datapath.
+type OfpCapabilities int32
+
+const (
+	OfpCapabilities_OFPC_INVALID      OfpCapabilities = 0
+	OfpCapabilities_OFPC_FLOW_STATS   OfpCapabilities = 1
+	OfpCapabilities_OFPC_TABLE_STATS  OfpCapabilities = 2
+	OfpCapabilities_OFPC_PORT_STATS   OfpCapabilities = 4
+	OfpCapabilities_OFPC_GROUP_STATS  OfpCapabilities = 8
+	OfpCapabilities_OFPC_IP_REASM     OfpCapabilities = 32
+	OfpCapabilities_OFPC_QUEUE_STATS  OfpCapabilities = 64
+	OfpCapabilities_OFPC_PORT_BLOCKED OfpCapabilities = 256
+)
+
+var OfpCapabilities_name = map[int32]string{
+	0:   "OFPC_INVALID",
+	1:   "OFPC_FLOW_STATS",
+	2:   "OFPC_TABLE_STATS",
+	4:   "OFPC_PORT_STATS",
+	8:   "OFPC_GROUP_STATS",
+	32:  "OFPC_IP_REASM",
+	64:  "OFPC_QUEUE_STATS",
+	256: "OFPC_PORT_BLOCKED",
+}
+
+var OfpCapabilities_value = map[string]int32{
+	"OFPC_INVALID":      0,
+	"OFPC_FLOW_STATS":   1,
+	"OFPC_TABLE_STATS":  2,
+	"OFPC_PORT_STATS":   4,
+	"OFPC_GROUP_STATS":  8,
+	"OFPC_IP_REASM":     32,
+	"OFPC_QUEUE_STATS":  64,
+	"OFPC_PORT_BLOCKED": 256,
+}
+
+func (x OfpCapabilities) String() string {
+	return proto.EnumName(OfpCapabilities_name, int32(x))
+}
+
+func (OfpCapabilities) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{6}
+}
+
+// Flags to indicate behavior of the physical port.  These flags are
+// used in ofp_port to describe the current configuration.  They are
+// used in the ofp_port_mod message to configure the port's behavior.
+type OfpPortConfig int32
+
+const (
+	OfpPortConfig_OFPPC_INVALID      OfpPortConfig = 0
+	OfpPortConfig_OFPPC_PORT_DOWN    OfpPortConfig = 1
+	OfpPortConfig_OFPPC_NO_RECV      OfpPortConfig = 4
+	OfpPortConfig_OFPPC_NO_FWD       OfpPortConfig = 32
+	OfpPortConfig_OFPPC_NO_PACKET_IN OfpPortConfig = 64
+)
+
+var OfpPortConfig_name = map[int32]string{
+	0:  "OFPPC_INVALID",
+	1:  "OFPPC_PORT_DOWN",
+	4:  "OFPPC_NO_RECV",
+	32: "OFPPC_NO_FWD",
+	64: "OFPPC_NO_PACKET_IN",
+}
+
+var OfpPortConfig_value = map[string]int32{
+	"OFPPC_INVALID":      0,
+	"OFPPC_PORT_DOWN":    1,
+	"OFPPC_NO_RECV":      4,
+	"OFPPC_NO_FWD":       32,
+	"OFPPC_NO_PACKET_IN": 64,
+}
+
+func (x OfpPortConfig) String() string {
+	return proto.EnumName(OfpPortConfig_name, int32(x))
+}
+
+func (OfpPortConfig) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{7}
+}
+
+// Current state of the physical port.  These are not configurable from
+// the controller.
+type OfpPortState int32
+
+const (
+	OfpPortState_OFPPS_INVALID   OfpPortState = 0
+	OfpPortState_OFPPS_LINK_DOWN OfpPortState = 1
+	OfpPortState_OFPPS_BLOCKED   OfpPortState = 2
+	OfpPortState_OFPPS_LIVE      OfpPortState = 4
+)
+
+var OfpPortState_name = map[int32]string{
+	0: "OFPPS_INVALID",
+	1: "OFPPS_LINK_DOWN",
+	2: "OFPPS_BLOCKED",
+	4: "OFPPS_LIVE",
+}
+
+var OfpPortState_value = map[string]int32{
+	"OFPPS_INVALID":   0,
+	"OFPPS_LINK_DOWN": 1,
+	"OFPPS_BLOCKED":   2,
+	"OFPPS_LIVE":      4,
+}
+
+func (x OfpPortState) String() string {
+	return proto.EnumName(OfpPortState_name, int32(x))
+}
+
+func (OfpPortState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{8}
+}
+
+// Features of ports available in a datapath.
+type OfpPortFeatures int32
+
+const (
+	OfpPortFeatures_OFPPF_INVALID    OfpPortFeatures = 0
+	OfpPortFeatures_OFPPF_10MB_HD    OfpPortFeatures = 1
+	OfpPortFeatures_OFPPF_10MB_FD    OfpPortFeatures = 2
+	OfpPortFeatures_OFPPF_100MB_HD   OfpPortFeatures = 4
+	OfpPortFeatures_OFPPF_100MB_FD   OfpPortFeatures = 8
+	OfpPortFeatures_OFPPF_1GB_HD     OfpPortFeatures = 16
+	OfpPortFeatures_OFPPF_1GB_FD     OfpPortFeatures = 32
+	OfpPortFeatures_OFPPF_10GB_FD    OfpPortFeatures = 64
+	OfpPortFeatures_OFPPF_40GB_FD    OfpPortFeatures = 128
+	OfpPortFeatures_OFPPF_100GB_FD   OfpPortFeatures = 256
+	OfpPortFeatures_OFPPF_1TB_FD     OfpPortFeatures = 512
+	OfpPortFeatures_OFPPF_OTHER      OfpPortFeatures = 1024
+	OfpPortFeatures_OFPPF_COPPER     OfpPortFeatures = 2048
+	OfpPortFeatures_OFPPF_FIBER      OfpPortFeatures = 4096
+	OfpPortFeatures_OFPPF_AUTONEG    OfpPortFeatures = 8192
+	OfpPortFeatures_OFPPF_PAUSE      OfpPortFeatures = 16384
+	OfpPortFeatures_OFPPF_PAUSE_ASYM OfpPortFeatures = 32768
+)
+
+var OfpPortFeatures_name = map[int32]string{
+	0:     "OFPPF_INVALID",
+	1:     "OFPPF_10MB_HD",
+	2:     "OFPPF_10MB_FD",
+	4:     "OFPPF_100MB_HD",
+	8:     "OFPPF_100MB_FD",
+	16:    "OFPPF_1GB_HD",
+	32:    "OFPPF_1GB_FD",
+	64:    "OFPPF_10GB_FD",
+	128:   "OFPPF_40GB_FD",
+	256:   "OFPPF_100GB_FD",
+	512:   "OFPPF_1TB_FD",
+	1024:  "OFPPF_OTHER",
+	2048:  "OFPPF_COPPER",
+	4096:  "OFPPF_FIBER",
+	8192:  "OFPPF_AUTONEG",
+	16384: "OFPPF_PAUSE",
+	32768: "OFPPF_PAUSE_ASYM",
+}
+
+var OfpPortFeatures_value = map[string]int32{
+	"OFPPF_INVALID":    0,
+	"OFPPF_10MB_HD":    1,
+	"OFPPF_10MB_FD":    2,
+	"OFPPF_100MB_HD":   4,
+	"OFPPF_100MB_FD":   8,
+	"OFPPF_1GB_HD":     16,
+	"OFPPF_1GB_FD":     32,
+	"OFPPF_10GB_FD":    64,
+	"OFPPF_40GB_FD":    128,
+	"OFPPF_100GB_FD":   256,
+	"OFPPF_1TB_FD":     512,
+	"OFPPF_OTHER":      1024,
+	"OFPPF_COPPER":     2048,
+	"OFPPF_FIBER":      4096,
+	"OFPPF_AUTONEG":    8192,
+	"OFPPF_PAUSE":      16384,
+	"OFPPF_PAUSE_ASYM": 32768,
+}
+
+func (x OfpPortFeatures) String() string {
+	return proto.EnumName(OfpPortFeatures_name, int32(x))
+}
+
+func (OfpPortFeatures) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{9}
+}
+
+// What changed about the physical port
+type OfpPortReason int32
+
+const (
+	OfpPortReason_OFPPR_ADD    OfpPortReason = 0
+	OfpPortReason_OFPPR_DELETE OfpPortReason = 1
+	OfpPortReason_OFPPR_MODIFY OfpPortReason = 2
+)
+
+var OfpPortReason_name = map[int32]string{
+	0: "OFPPR_ADD",
+	1: "OFPPR_DELETE",
+	2: "OFPPR_MODIFY",
+}
+
+var OfpPortReason_value = map[string]int32{
+	"OFPPR_ADD":    0,
+	"OFPPR_DELETE": 1,
+	"OFPPR_MODIFY": 2,
+}
+
+func (x OfpPortReason) String() string {
+	return proto.EnumName(OfpPortReason_name, int32(x))
+}
+
+func (OfpPortReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{10}
+}
+
+// The match type indicates the match structure (set of fields that compose the
+// match) in use. The match type is placed in the type field at the beginning
+// of all match structures. The "OpenFlow Extensible Match" type corresponds
+// to OXM TLV format described below and must be supported by all OpenFlow
+// switches. Extensions that define other match types may be published on the
+// ONF wiki. Support for extensions is optional.
+type OfpMatchType int32
+
+const (
+	OfpMatchType_OFPMT_STANDARD OfpMatchType = 0
+	OfpMatchType_OFPMT_OXM      OfpMatchType = 1
+)
+
+var OfpMatchType_name = map[int32]string{
+	0: "OFPMT_STANDARD",
+	1: "OFPMT_OXM",
+}
+
+var OfpMatchType_value = map[string]int32{
+	"OFPMT_STANDARD": 0,
+	"OFPMT_OXM":      1,
+}
+
+func (x OfpMatchType) String() string {
+	return proto.EnumName(OfpMatchType_name, int32(x))
+}
+
+func (OfpMatchType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{11}
+}
+
+// OXM Class IDs.
+// The high order bit differentiate reserved classes from member classes.
+// Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
+// Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
+type OfpOxmClass int32
+
+const (
+	OfpOxmClass_OFPXMC_NXM_0          OfpOxmClass = 0
+	OfpOxmClass_OFPXMC_NXM_1          OfpOxmClass = 1
+	OfpOxmClass_OFPXMC_OPENFLOW_BASIC OfpOxmClass = 32768
+	OfpOxmClass_OFPXMC_EXPERIMENTER   OfpOxmClass = 65535
+)
+
+var OfpOxmClass_name = map[int32]string{
+	0:     "OFPXMC_NXM_0",
+	1:     "OFPXMC_NXM_1",
+	32768: "OFPXMC_OPENFLOW_BASIC",
+	65535: "OFPXMC_EXPERIMENTER",
+}
+
+var OfpOxmClass_value = map[string]int32{
+	"OFPXMC_NXM_0":          0,
+	"OFPXMC_NXM_1":          1,
+	"OFPXMC_OPENFLOW_BASIC": 32768,
+	"OFPXMC_EXPERIMENTER":   65535,
+}
+
+func (x OfpOxmClass) String() string {
+	return proto.EnumName(OfpOxmClass_name, int32(x))
+}
+
+func (OfpOxmClass) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{12}
+}
+
+// OXM Flow field types for OpenFlow basic class.
+type OxmOfbFieldTypes int32
+
+const (
+	OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT        OxmOfbFieldTypes = 0
+	OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT    OxmOfbFieldTypes = 1
+	OxmOfbFieldTypes_OFPXMT_OFB_METADATA       OxmOfbFieldTypes = 2
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST        OxmOfbFieldTypes = 3
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC        OxmOfbFieldTypes = 4
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE       OxmOfbFieldTypes = 5
+	OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID       OxmOfbFieldTypes = 6
+	OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP       OxmOfbFieldTypes = 7
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP        OxmOfbFieldTypes = 8
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN         OxmOfbFieldTypes = 9
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO       OxmOfbFieldTypes = 10
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC       OxmOfbFieldTypes = 11
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST       OxmOfbFieldTypes = 12
+	OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC        OxmOfbFieldTypes = 13
+	OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST        OxmOfbFieldTypes = 14
+	OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC        OxmOfbFieldTypes = 15
+	OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST        OxmOfbFieldTypes = 16
+	OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC       OxmOfbFieldTypes = 17
+	OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST       OxmOfbFieldTypes = 18
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE    OxmOfbFieldTypes = 19
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE    OxmOfbFieldTypes = 20
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP         OxmOfbFieldTypes = 21
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA        OxmOfbFieldTypes = 22
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA        OxmOfbFieldTypes = 23
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA        OxmOfbFieldTypes = 24
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA        OxmOfbFieldTypes = 25
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC       OxmOfbFieldTypes = 26
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST       OxmOfbFieldTypes = 27
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL    OxmOfbFieldTypes = 28
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE    OxmOfbFieldTypes = 29
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE    OxmOfbFieldTypes = 30
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET OxmOfbFieldTypes = 31
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL    OxmOfbFieldTypes = 32
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL    OxmOfbFieldTypes = 33
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL     OxmOfbFieldTypes = 34
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC        OxmOfbFieldTypes = 35
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS       OxmOfbFieldTypes = 36
+	OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID       OxmOfbFieldTypes = 37
+	OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID      OxmOfbFieldTypes = 38
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR    OxmOfbFieldTypes = 39
+)
+
+var OxmOfbFieldTypes_name = map[int32]string{
+	0:  "OFPXMT_OFB_IN_PORT",
+	1:  "OFPXMT_OFB_IN_PHY_PORT",
+	2:  "OFPXMT_OFB_METADATA",
+	3:  "OFPXMT_OFB_ETH_DST",
+	4:  "OFPXMT_OFB_ETH_SRC",
+	5:  "OFPXMT_OFB_ETH_TYPE",
+	6:  "OFPXMT_OFB_VLAN_VID",
+	7:  "OFPXMT_OFB_VLAN_PCP",
+	8:  "OFPXMT_OFB_IP_DSCP",
+	9:  "OFPXMT_OFB_IP_ECN",
+	10: "OFPXMT_OFB_IP_PROTO",
+	11: "OFPXMT_OFB_IPV4_SRC",
+	12: "OFPXMT_OFB_IPV4_DST",
+	13: "OFPXMT_OFB_TCP_SRC",
+	14: "OFPXMT_OFB_TCP_DST",
+	15: "OFPXMT_OFB_UDP_SRC",
+	16: "OFPXMT_OFB_UDP_DST",
+	17: "OFPXMT_OFB_SCTP_SRC",
+	18: "OFPXMT_OFB_SCTP_DST",
+	19: "OFPXMT_OFB_ICMPV4_TYPE",
+	20: "OFPXMT_OFB_ICMPV4_CODE",
+	21: "OFPXMT_OFB_ARP_OP",
+	22: "OFPXMT_OFB_ARP_SPA",
+	23: "OFPXMT_OFB_ARP_TPA",
+	24: "OFPXMT_OFB_ARP_SHA",
+	25: "OFPXMT_OFB_ARP_THA",
+	26: "OFPXMT_OFB_IPV6_SRC",
+	27: "OFPXMT_OFB_IPV6_DST",
+	28: "OFPXMT_OFB_IPV6_FLABEL",
+	29: "OFPXMT_OFB_ICMPV6_TYPE",
+	30: "OFPXMT_OFB_ICMPV6_CODE",
+	31: "OFPXMT_OFB_IPV6_ND_TARGET",
+	32: "OFPXMT_OFB_IPV6_ND_SLL",
+	33: "OFPXMT_OFB_IPV6_ND_TLL",
+	34: "OFPXMT_OFB_MPLS_LABEL",
+	35: "OFPXMT_OFB_MPLS_TC",
+	36: "OFPXMT_OFB_MPLS_BOS",
+	37: "OFPXMT_OFB_PBB_ISID",
+	38: "OFPXMT_OFB_TUNNEL_ID",
+	39: "OFPXMT_OFB_IPV6_EXTHDR",
+}
+
+var OxmOfbFieldTypes_value = map[string]int32{
+	"OFPXMT_OFB_IN_PORT":        0,
+	"OFPXMT_OFB_IN_PHY_PORT":    1,
+	"OFPXMT_OFB_METADATA":       2,
+	"OFPXMT_OFB_ETH_DST":        3,
+	"OFPXMT_OFB_ETH_SRC":        4,
+	"OFPXMT_OFB_ETH_TYPE":       5,
+	"OFPXMT_OFB_VLAN_VID":       6,
+	"OFPXMT_OFB_VLAN_PCP":       7,
+	"OFPXMT_OFB_IP_DSCP":        8,
+	"OFPXMT_OFB_IP_ECN":         9,
+	"OFPXMT_OFB_IP_PROTO":       10,
+	"OFPXMT_OFB_IPV4_SRC":       11,
+	"OFPXMT_OFB_IPV4_DST":       12,
+	"OFPXMT_OFB_TCP_SRC":        13,
+	"OFPXMT_OFB_TCP_DST":        14,
+	"OFPXMT_OFB_UDP_SRC":        15,
+	"OFPXMT_OFB_UDP_DST":        16,
+	"OFPXMT_OFB_SCTP_SRC":       17,
+	"OFPXMT_OFB_SCTP_DST":       18,
+	"OFPXMT_OFB_ICMPV4_TYPE":    19,
+	"OFPXMT_OFB_ICMPV4_CODE":    20,
+	"OFPXMT_OFB_ARP_OP":         21,
+	"OFPXMT_OFB_ARP_SPA":        22,
+	"OFPXMT_OFB_ARP_TPA":        23,
+	"OFPXMT_OFB_ARP_SHA":        24,
+	"OFPXMT_OFB_ARP_THA":        25,
+	"OFPXMT_OFB_IPV6_SRC":       26,
+	"OFPXMT_OFB_IPV6_DST":       27,
+	"OFPXMT_OFB_IPV6_FLABEL":    28,
+	"OFPXMT_OFB_ICMPV6_TYPE":    29,
+	"OFPXMT_OFB_ICMPV6_CODE":    30,
+	"OFPXMT_OFB_IPV6_ND_TARGET": 31,
+	"OFPXMT_OFB_IPV6_ND_SLL":    32,
+	"OFPXMT_OFB_IPV6_ND_TLL":    33,
+	"OFPXMT_OFB_MPLS_LABEL":     34,
+	"OFPXMT_OFB_MPLS_TC":        35,
+	"OFPXMT_OFB_MPLS_BOS":       36,
+	"OFPXMT_OFB_PBB_ISID":       37,
+	"OFPXMT_OFB_TUNNEL_ID":      38,
+	"OFPXMT_OFB_IPV6_EXTHDR":    39,
+}
+
+func (x OxmOfbFieldTypes) String() string {
+	return proto.EnumName(OxmOfbFieldTypes_name, int32(x))
+}
+
+func (OxmOfbFieldTypes) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{13}
+}
+
+// The VLAN id is 12-bits, so we can use the entire 16 bits to indicate
+// special conditions.
+type OfpVlanId int32
+
+const (
+	OfpVlanId_OFPVID_NONE    OfpVlanId = 0
+	OfpVlanId_OFPVID_PRESENT OfpVlanId = 4096
+)
+
+var OfpVlanId_name = map[int32]string{
+	0:    "OFPVID_NONE",
+	4096: "OFPVID_PRESENT",
+}
+
+var OfpVlanId_value = map[string]int32{
+	"OFPVID_NONE":    0,
+	"OFPVID_PRESENT": 4096,
+}
+
+func (x OfpVlanId) String() string {
+	return proto.EnumName(OfpVlanId_name, int32(x))
+}
+
+func (OfpVlanId) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{14}
+}
+
+// Bit definitions for IPv6 Extension Header pseudo-field.
+type OfpIpv6ExthdrFlags int32
+
+const (
+	OfpIpv6ExthdrFlags_OFPIEH_INVALID OfpIpv6ExthdrFlags = 0
+	OfpIpv6ExthdrFlags_OFPIEH_NONEXT  OfpIpv6ExthdrFlags = 1
+	OfpIpv6ExthdrFlags_OFPIEH_ESP     OfpIpv6ExthdrFlags = 2
+	OfpIpv6ExthdrFlags_OFPIEH_AUTH    OfpIpv6ExthdrFlags = 4
+	OfpIpv6ExthdrFlags_OFPIEH_DEST    OfpIpv6ExthdrFlags = 8
+	OfpIpv6ExthdrFlags_OFPIEH_FRAG    OfpIpv6ExthdrFlags = 16
+	OfpIpv6ExthdrFlags_OFPIEH_ROUTER  OfpIpv6ExthdrFlags = 32
+	OfpIpv6ExthdrFlags_OFPIEH_HOP     OfpIpv6ExthdrFlags = 64
+	OfpIpv6ExthdrFlags_OFPIEH_UNREP   OfpIpv6ExthdrFlags = 128
+	OfpIpv6ExthdrFlags_OFPIEH_UNSEQ   OfpIpv6ExthdrFlags = 256
+)
+
+var OfpIpv6ExthdrFlags_name = map[int32]string{
+	0:   "OFPIEH_INVALID",
+	1:   "OFPIEH_NONEXT",
+	2:   "OFPIEH_ESP",
+	4:   "OFPIEH_AUTH",
+	8:   "OFPIEH_DEST",
+	16:  "OFPIEH_FRAG",
+	32:  "OFPIEH_ROUTER",
+	64:  "OFPIEH_HOP",
+	128: "OFPIEH_UNREP",
+	256: "OFPIEH_UNSEQ",
+}
+
+var OfpIpv6ExthdrFlags_value = map[string]int32{
+	"OFPIEH_INVALID": 0,
+	"OFPIEH_NONEXT":  1,
+	"OFPIEH_ESP":     2,
+	"OFPIEH_AUTH":    4,
+	"OFPIEH_DEST":    8,
+	"OFPIEH_FRAG":    16,
+	"OFPIEH_ROUTER":  32,
+	"OFPIEH_HOP":     64,
+	"OFPIEH_UNREP":   128,
+	"OFPIEH_UNSEQ":   256,
+}
+
+func (x OfpIpv6ExthdrFlags) String() string {
+	return proto.EnumName(OfpIpv6ExthdrFlags_name, int32(x))
+}
+
+func (OfpIpv6ExthdrFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{15}
+}
+
+type OfpActionType int32
+
+const (
+	OfpActionType_OFPAT_OUTPUT       OfpActionType = 0
+	OfpActionType_OFPAT_COPY_TTL_OUT OfpActionType = 11
+	OfpActionType_OFPAT_COPY_TTL_IN  OfpActionType = 12
+	OfpActionType_OFPAT_SET_MPLS_TTL OfpActionType = 15
+	OfpActionType_OFPAT_DEC_MPLS_TTL OfpActionType = 16
+	OfpActionType_OFPAT_PUSH_VLAN    OfpActionType = 17
+	OfpActionType_OFPAT_POP_VLAN     OfpActionType = 18
+	OfpActionType_OFPAT_PUSH_MPLS    OfpActionType = 19
+	OfpActionType_OFPAT_POP_MPLS     OfpActionType = 20
+	OfpActionType_OFPAT_SET_QUEUE    OfpActionType = 21
+	OfpActionType_OFPAT_GROUP        OfpActionType = 22
+	OfpActionType_OFPAT_SET_NW_TTL   OfpActionType = 23
+	OfpActionType_OFPAT_DEC_NW_TTL   OfpActionType = 24
+	OfpActionType_OFPAT_SET_FIELD    OfpActionType = 25
+	OfpActionType_OFPAT_PUSH_PBB     OfpActionType = 26
+	OfpActionType_OFPAT_POP_PBB      OfpActionType = 27
+	OfpActionType_OFPAT_EXPERIMENTER OfpActionType = 65535
+)
+
+var OfpActionType_name = map[int32]string{
+	0:     "OFPAT_OUTPUT",
+	11:    "OFPAT_COPY_TTL_OUT",
+	12:    "OFPAT_COPY_TTL_IN",
+	15:    "OFPAT_SET_MPLS_TTL",
+	16:    "OFPAT_DEC_MPLS_TTL",
+	17:    "OFPAT_PUSH_VLAN",
+	18:    "OFPAT_POP_VLAN",
+	19:    "OFPAT_PUSH_MPLS",
+	20:    "OFPAT_POP_MPLS",
+	21:    "OFPAT_SET_QUEUE",
+	22:    "OFPAT_GROUP",
+	23:    "OFPAT_SET_NW_TTL",
+	24:    "OFPAT_DEC_NW_TTL",
+	25:    "OFPAT_SET_FIELD",
+	26:    "OFPAT_PUSH_PBB",
+	27:    "OFPAT_POP_PBB",
+	65535: "OFPAT_EXPERIMENTER",
+}
+
+var OfpActionType_value = map[string]int32{
+	"OFPAT_OUTPUT":       0,
+	"OFPAT_COPY_TTL_OUT": 11,
+	"OFPAT_COPY_TTL_IN":  12,
+	"OFPAT_SET_MPLS_TTL": 15,
+	"OFPAT_DEC_MPLS_TTL": 16,
+	"OFPAT_PUSH_VLAN":    17,
+	"OFPAT_POP_VLAN":     18,
+	"OFPAT_PUSH_MPLS":    19,
+	"OFPAT_POP_MPLS":     20,
+	"OFPAT_SET_QUEUE":    21,
+	"OFPAT_GROUP":        22,
+	"OFPAT_SET_NW_TTL":   23,
+	"OFPAT_DEC_NW_TTL":   24,
+	"OFPAT_SET_FIELD":    25,
+	"OFPAT_PUSH_PBB":     26,
+	"OFPAT_POP_PBB":      27,
+	"OFPAT_EXPERIMENTER": 65535,
+}
+
+func (x OfpActionType) String() string {
+	return proto.EnumName(OfpActionType_name, int32(x))
+}
+
+func (OfpActionType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{16}
+}
+
+type OfpControllerMaxLen int32
+
+const (
+	OfpControllerMaxLen_OFPCML_INVALID   OfpControllerMaxLen = 0
+	OfpControllerMaxLen_OFPCML_MAX       OfpControllerMaxLen = 65509
+	OfpControllerMaxLen_OFPCML_NO_BUFFER OfpControllerMaxLen = 65535
+)
+
+var OfpControllerMaxLen_name = map[int32]string{
+	0:     "OFPCML_INVALID",
+	65509: "OFPCML_MAX",
+	65535: "OFPCML_NO_BUFFER",
+}
+
+var OfpControllerMaxLen_value = map[string]int32{
+	"OFPCML_INVALID":   0,
+	"OFPCML_MAX":       65509,
+	"OFPCML_NO_BUFFER": 65535,
+}
+
+func (x OfpControllerMaxLen) String() string {
+	return proto.EnumName(OfpControllerMaxLen_name, int32(x))
+}
+
+func (OfpControllerMaxLen) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{17}
+}
+
+type OfpInstructionType int32
+
+const (
+	OfpInstructionType_OFPIT_INVALID        OfpInstructionType = 0
+	OfpInstructionType_OFPIT_GOTO_TABLE     OfpInstructionType = 1
+	OfpInstructionType_OFPIT_WRITE_METADATA OfpInstructionType = 2
+	OfpInstructionType_OFPIT_WRITE_ACTIONS  OfpInstructionType = 3
+	OfpInstructionType_OFPIT_APPLY_ACTIONS  OfpInstructionType = 4
+	OfpInstructionType_OFPIT_CLEAR_ACTIONS  OfpInstructionType = 5
+	OfpInstructionType_OFPIT_METER          OfpInstructionType = 6
+	OfpInstructionType_OFPIT_EXPERIMENTER   OfpInstructionType = 65535
+)
+
+var OfpInstructionType_name = map[int32]string{
+	0:     "OFPIT_INVALID",
+	1:     "OFPIT_GOTO_TABLE",
+	2:     "OFPIT_WRITE_METADATA",
+	3:     "OFPIT_WRITE_ACTIONS",
+	4:     "OFPIT_APPLY_ACTIONS",
+	5:     "OFPIT_CLEAR_ACTIONS",
+	6:     "OFPIT_METER",
+	65535: "OFPIT_EXPERIMENTER",
+}
+
+var OfpInstructionType_value = map[string]int32{
+	"OFPIT_INVALID":        0,
+	"OFPIT_GOTO_TABLE":     1,
+	"OFPIT_WRITE_METADATA": 2,
+	"OFPIT_WRITE_ACTIONS":  3,
+	"OFPIT_APPLY_ACTIONS":  4,
+	"OFPIT_CLEAR_ACTIONS":  5,
+	"OFPIT_METER":          6,
+	"OFPIT_EXPERIMENTER":   65535,
+}
+
+func (x OfpInstructionType) String() string {
+	return proto.EnumName(OfpInstructionType_name, int32(x))
+}
+
+func (OfpInstructionType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{18}
+}
+
+type OfpFlowModCommand int32
+
+const (
+	OfpFlowModCommand_OFPFC_ADD           OfpFlowModCommand = 0
+	OfpFlowModCommand_OFPFC_MODIFY        OfpFlowModCommand = 1
+	OfpFlowModCommand_OFPFC_MODIFY_STRICT OfpFlowModCommand = 2
+	OfpFlowModCommand_OFPFC_DELETE        OfpFlowModCommand = 3
+	OfpFlowModCommand_OFPFC_DELETE_STRICT OfpFlowModCommand = 4
+)
+
+var OfpFlowModCommand_name = map[int32]string{
+	0: "OFPFC_ADD",
+	1: "OFPFC_MODIFY",
+	2: "OFPFC_MODIFY_STRICT",
+	3: "OFPFC_DELETE",
+	4: "OFPFC_DELETE_STRICT",
+}
+
+var OfpFlowModCommand_value = map[string]int32{
+	"OFPFC_ADD":           0,
+	"OFPFC_MODIFY":        1,
+	"OFPFC_MODIFY_STRICT": 2,
+	"OFPFC_DELETE":        3,
+	"OFPFC_DELETE_STRICT": 4,
+}
+
+func (x OfpFlowModCommand) String() string {
+	return proto.EnumName(OfpFlowModCommand_name, int32(x))
+}
+
+func (OfpFlowModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{19}
+}
+
+type OfpFlowModFlags int32
+
+const (
+	OfpFlowModFlags_OFPFF_INVALID       OfpFlowModFlags = 0
+	OfpFlowModFlags_OFPFF_SEND_FLOW_REM OfpFlowModFlags = 1
+	OfpFlowModFlags_OFPFF_CHECK_OVERLAP OfpFlowModFlags = 2
+	OfpFlowModFlags_OFPFF_RESET_COUNTS  OfpFlowModFlags = 4
+	OfpFlowModFlags_OFPFF_NO_PKT_COUNTS OfpFlowModFlags = 8
+	OfpFlowModFlags_OFPFF_NO_BYT_COUNTS OfpFlowModFlags = 16
+)
+
+var OfpFlowModFlags_name = map[int32]string{
+	0:  "OFPFF_INVALID",
+	1:  "OFPFF_SEND_FLOW_REM",
+	2:  "OFPFF_CHECK_OVERLAP",
+	4:  "OFPFF_RESET_COUNTS",
+	8:  "OFPFF_NO_PKT_COUNTS",
+	16: "OFPFF_NO_BYT_COUNTS",
+}
+
+var OfpFlowModFlags_value = map[string]int32{
+	"OFPFF_INVALID":       0,
+	"OFPFF_SEND_FLOW_REM": 1,
+	"OFPFF_CHECK_OVERLAP": 2,
+	"OFPFF_RESET_COUNTS":  4,
+	"OFPFF_NO_PKT_COUNTS": 8,
+	"OFPFF_NO_BYT_COUNTS": 16,
+}
+
+func (x OfpFlowModFlags) String() string {
+	return proto.EnumName(OfpFlowModFlags_name, int32(x))
+}
+
+func (OfpFlowModFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{20}
+}
+
+// Group numbering. Groups can use any number up to OFPG_MAX.
+type OfpGroup int32
+
+const (
+	OfpGroup_OFPG_INVALID OfpGroup = 0
+	// Last usable group number.
+	OfpGroup_OFPG_MAX OfpGroup = 2147483392
+	// Fake groups.
+	OfpGroup_OFPG_ALL OfpGroup = 2147483644
+	OfpGroup_OFPG_ANY OfpGroup = 2147483647
+)
+
+var OfpGroup_name = map[int32]string{
+	0:          "OFPG_INVALID",
+	2147483392: "OFPG_MAX",
+	2147483644: "OFPG_ALL",
+	2147483647: "OFPG_ANY",
+}
+
+var OfpGroup_value = map[string]int32{
+	"OFPG_INVALID": 0,
+	"OFPG_MAX":     2147483392,
+	"OFPG_ALL":     2147483644,
+	"OFPG_ANY":     2147483647,
+}
+
+func (x OfpGroup) String() string {
+	return proto.EnumName(OfpGroup_name, int32(x))
+}
+
+func (OfpGroup) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{21}
+}
+
+// Group commands
+type OfpGroupModCommand int32
+
+const (
+	OfpGroupModCommand_OFPGC_ADD    OfpGroupModCommand = 0
+	OfpGroupModCommand_OFPGC_MODIFY OfpGroupModCommand = 1
+	OfpGroupModCommand_OFPGC_DELETE OfpGroupModCommand = 2
+)
+
+var OfpGroupModCommand_name = map[int32]string{
+	0: "OFPGC_ADD",
+	1: "OFPGC_MODIFY",
+	2: "OFPGC_DELETE",
+}
+
+var OfpGroupModCommand_value = map[string]int32{
+	"OFPGC_ADD":    0,
+	"OFPGC_MODIFY": 1,
+	"OFPGC_DELETE": 2,
+}
+
+func (x OfpGroupModCommand) String() string {
+	return proto.EnumName(OfpGroupModCommand_name, int32(x))
+}
+
+func (OfpGroupModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{22}
+}
+
+// Group types.  Values in the range [128; 255] are reserved for experimental
+// use.
+type OfpGroupType int32
+
+const (
+	OfpGroupType_OFPGT_ALL      OfpGroupType = 0
+	OfpGroupType_OFPGT_SELECT   OfpGroupType = 1
+	OfpGroupType_OFPGT_INDIRECT OfpGroupType = 2
+	OfpGroupType_OFPGT_FF       OfpGroupType = 3
+)
+
+var OfpGroupType_name = map[int32]string{
+	0: "OFPGT_ALL",
+	1: "OFPGT_SELECT",
+	2: "OFPGT_INDIRECT",
+	3: "OFPGT_FF",
+}
+
+var OfpGroupType_value = map[string]int32{
+	"OFPGT_ALL":      0,
+	"OFPGT_SELECT":   1,
+	"OFPGT_INDIRECT": 2,
+	"OFPGT_FF":       3,
+}
+
+func (x OfpGroupType) String() string {
+	return proto.EnumName(OfpGroupType_name, int32(x))
+}
+
+func (OfpGroupType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{23}
+}
+
+// Why is this packet being sent to the controller?
+type OfpPacketInReason int32
+
+const (
+	OfpPacketInReason_OFPR_NO_MATCH    OfpPacketInReason = 0
+	OfpPacketInReason_OFPR_ACTION      OfpPacketInReason = 1
+	OfpPacketInReason_OFPR_INVALID_TTL OfpPacketInReason = 2
+)
+
+var OfpPacketInReason_name = map[int32]string{
+	0: "OFPR_NO_MATCH",
+	1: "OFPR_ACTION",
+	2: "OFPR_INVALID_TTL",
+}
+
+var OfpPacketInReason_value = map[string]int32{
+	"OFPR_NO_MATCH":    0,
+	"OFPR_ACTION":      1,
+	"OFPR_INVALID_TTL": 2,
+}
+
+func (x OfpPacketInReason) String() string {
+	return proto.EnumName(OfpPacketInReason_name, int32(x))
+}
+
+func (OfpPacketInReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{24}
+}
+
+// Why was this flow removed?
+type OfpFlowRemovedReason int32
+
+const (
+	OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT OfpFlowRemovedReason = 0
+	OfpFlowRemovedReason_OFPRR_HARD_TIMEOUT OfpFlowRemovedReason = 1
+	OfpFlowRemovedReason_OFPRR_DELETE       OfpFlowRemovedReason = 2
+	OfpFlowRemovedReason_OFPRR_GROUP_DELETE OfpFlowRemovedReason = 3
+	OfpFlowRemovedReason_OFPRR_METER_DELETE OfpFlowRemovedReason = 4
+)
+
+var OfpFlowRemovedReason_name = map[int32]string{
+	0: "OFPRR_IDLE_TIMEOUT",
+	1: "OFPRR_HARD_TIMEOUT",
+	2: "OFPRR_DELETE",
+	3: "OFPRR_GROUP_DELETE",
+	4: "OFPRR_METER_DELETE",
+}
+
+var OfpFlowRemovedReason_value = map[string]int32{
+	"OFPRR_IDLE_TIMEOUT": 0,
+	"OFPRR_HARD_TIMEOUT": 1,
+	"OFPRR_DELETE":       2,
+	"OFPRR_GROUP_DELETE": 3,
+	"OFPRR_METER_DELETE": 4,
+}
+
+func (x OfpFlowRemovedReason) String() string {
+	return proto.EnumName(OfpFlowRemovedReason_name, int32(x))
+}
+
+func (OfpFlowRemovedReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{25}
+}
+
+// Meter numbering. Flow meters can use any number up to OFPM_MAX.
+type OfpMeter int32
+
+const (
+	OfpMeter_OFPM_ZERO OfpMeter = 0
+	// Last usable meter.
+	OfpMeter_OFPM_MAX OfpMeter = 2147418112
+	// Virtual meters.
+	OfpMeter_OFPM_SLOWPATH   OfpMeter = 2147483645
+	OfpMeter_OFPM_CONTROLLER OfpMeter = 2147483646
+	OfpMeter_OFPM_ALL        OfpMeter = 2147483647
+)
+
+var OfpMeter_name = map[int32]string{
+	0:          "OFPM_ZERO",
+	2147418112: "OFPM_MAX",
+	2147483645: "OFPM_SLOWPATH",
+	2147483646: "OFPM_CONTROLLER",
+	2147483647: "OFPM_ALL",
+}
+
+var OfpMeter_value = map[string]int32{
+	"OFPM_ZERO":       0,
+	"OFPM_MAX":        2147418112,
+	"OFPM_SLOWPATH":   2147483645,
+	"OFPM_CONTROLLER": 2147483646,
+	"OFPM_ALL":        2147483647,
+}
+
+func (x OfpMeter) String() string {
+	return proto.EnumName(OfpMeter_name, int32(x))
+}
+
+func (OfpMeter) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{26}
+}
+
+// Meter band types
+type OfpMeterBandType int32
+
+const (
+	OfpMeterBandType_OFPMBT_INVALID      OfpMeterBandType = 0
+	OfpMeterBandType_OFPMBT_DROP         OfpMeterBandType = 1
+	OfpMeterBandType_OFPMBT_DSCP_REMARK  OfpMeterBandType = 2
+	OfpMeterBandType_OFPMBT_EXPERIMENTER OfpMeterBandType = 65535
+)
+
+var OfpMeterBandType_name = map[int32]string{
+	0:     "OFPMBT_INVALID",
+	1:     "OFPMBT_DROP",
+	2:     "OFPMBT_DSCP_REMARK",
+	65535: "OFPMBT_EXPERIMENTER",
+}
+
+var OfpMeterBandType_value = map[string]int32{
+	"OFPMBT_INVALID":      0,
+	"OFPMBT_DROP":         1,
+	"OFPMBT_DSCP_REMARK":  2,
+	"OFPMBT_EXPERIMENTER": 65535,
+}
+
+func (x OfpMeterBandType) String() string {
+	return proto.EnumName(OfpMeterBandType_name, int32(x))
+}
+
+func (OfpMeterBandType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{27}
+}
+
+// Meter commands
+type OfpMeterModCommand int32
+
+const (
+	OfpMeterModCommand_OFPMC_ADD    OfpMeterModCommand = 0
+	OfpMeterModCommand_OFPMC_MODIFY OfpMeterModCommand = 1
+	OfpMeterModCommand_OFPMC_DELETE OfpMeterModCommand = 2
+)
+
+var OfpMeterModCommand_name = map[int32]string{
+	0: "OFPMC_ADD",
+	1: "OFPMC_MODIFY",
+	2: "OFPMC_DELETE",
+}
+
+var OfpMeterModCommand_value = map[string]int32{
+	"OFPMC_ADD":    0,
+	"OFPMC_MODIFY": 1,
+	"OFPMC_DELETE": 2,
+}
+
+func (x OfpMeterModCommand) String() string {
+	return proto.EnumName(OfpMeterModCommand_name, int32(x))
+}
+
+func (OfpMeterModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{28}
+}
+
+// Meter configuration flags
+type OfpMeterFlags int32
+
+const (
+	OfpMeterFlags_OFPMF_INVALID OfpMeterFlags = 0
+	OfpMeterFlags_OFPMF_KBPS    OfpMeterFlags = 1
+	OfpMeterFlags_OFPMF_PKTPS   OfpMeterFlags = 2
+	OfpMeterFlags_OFPMF_BURST   OfpMeterFlags = 4
+	OfpMeterFlags_OFPMF_STATS   OfpMeterFlags = 8
+)
+
+var OfpMeterFlags_name = map[int32]string{
+	0: "OFPMF_INVALID",
+	1: "OFPMF_KBPS",
+	2: "OFPMF_PKTPS",
+	4: "OFPMF_BURST",
+	8: "OFPMF_STATS",
+}
+
+var OfpMeterFlags_value = map[string]int32{
+	"OFPMF_INVALID": 0,
+	"OFPMF_KBPS":    1,
+	"OFPMF_PKTPS":   2,
+	"OFPMF_BURST":   4,
+	"OFPMF_STATS":   8,
+}
+
+func (x OfpMeterFlags) String() string {
+	return proto.EnumName(OfpMeterFlags_name, int32(x))
+}
+
+func (OfpMeterFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{29}
+}
+
+// Values for 'type' in ofp_error_message.  These values are immutable: they
+// will not change in future versions of the protocol (although new values may
+// be added).
+type OfpErrorType int32
+
+const (
+	OfpErrorType_OFPET_HELLO_FAILED          OfpErrorType = 0
+	OfpErrorType_OFPET_BAD_REQUEST           OfpErrorType = 1
+	OfpErrorType_OFPET_BAD_ACTION            OfpErrorType = 2
+	OfpErrorType_OFPET_BAD_INSTRUCTION       OfpErrorType = 3
+	OfpErrorType_OFPET_BAD_MATCH             OfpErrorType = 4
+	OfpErrorType_OFPET_FLOW_MOD_FAILED       OfpErrorType = 5
+	OfpErrorType_OFPET_GROUP_MOD_FAILED      OfpErrorType = 6
+	OfpErrorType_OFPET_PORT_MOD_FAILED       OfpErrorType = 7
+	OfpErrorType_OFPET_TABLE_MOD_FAILED      OfpErrorType = 8
+	OfpErrorType_OFPET_QUEUE_OP_FAILED       OfpErrorType = 9
+	OfpErrorType_OFPET_SWITCH_CONFIG_FAILED  OfpErrorType = 10
+	OfpErrorType_OFPET_ROLE_REQUEST_FAILED   OfpErrorType = 11
+	OfpErrorType_OFPET_METER_MOD_FAILED      OfpErrorType = 12
+	OfpErrorType_OFPET_TABLE_FEATURES_FAILED OfpErrorType = 13
+	OfpErrorType_OFPET_EXPERIMENTER          OfpErrorType = 65535
+)
+
+var OfpErrorType_name = map[int32]string{
+	0:     "OFPET_HELLO_FAILED",
+	1:     "OFPET_BAD_REQUEST",
+	2:     "OFPET_BAD_ACTION",
+	3:     "OFPET_BAD_INSTRUCTION",
+	4:     "OFPET_BAD_MATCH",
+	5:     "OFPET_FLOW_MOD_FAILED",
+	6:     "OFPET_GROUP_MOD_FAILED",
+	7:     "OFPET_PORT_MOD_FAILED",
+	8:     "OFPET_TABLE_MOD_FAILED",
+	9:     "OFPET_QUEUE_OP_FAILED",
+	10:    "OFPET_SWITCH_CONFIG_FAILED",
+	11:    "OFPET_ROLE_REQUEST_FAILED",
+	12:    "OFPET_METER_MOD_FAILED",
+	13:    "OFPET_TABLE_FEATURES_FAILED",
+	65535: "OFPET_EXPERIMENTER",
+}
+
+var OfpErrorType_value = map[string]int32{
+	"OFPET_HELLO_FAILED":          0,
+	"OFPET_BAD_REQUEST":           1,
+	"OFPET_BAD_ACTION":            2,
+	"OFPET_BAD_INSTRUCTION":       3,
+	"OFPET_BAD_MATCH":             4,
+	"OFPET_FLOW_MOD_FAILED":       5,
+	"OFPET_GROUP_MOD_FAILED":      6,
+	"OFPET_PORT_MOD_FAILED":       7,
+	"OFPET_TABLE_MOD_FAILED":      8,
+	"OFPET_QUEUE_OP_FAILED":       9,
+	"OFPET_SWITCH_CONFIG_FAILED":  10,
+	"OFPET_ROLE_REQUEST_FAILED":   11,
+	"OFPET_METER_MOD_FAILED":      12,
+	"OFPET_TABLE_FEATURES_FAILED": 13,
+	"OFPET_EXPERIMENTER":          65535,
+}
+
+func (x OfpErrorType) String() string {
+	return proto.EnumName(OfpErrorType_name, int32(x))
+}
+
+func (OfpErrorType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{30}
+}
+
+// ofp_error_msg 'code' values for OFPET_HELLO_FAILED.  'data' contains an
+// ASCII text string that may give failure details.
+type OfpHelloFailedCode int32
+
+const (
+	OfpHelloFailedCode_OFPHFC_INCOMPATIBLE OfpHelloFailedCode = 0
+	OfpHelloFailedCode_OFPHFC_EPERM        OfpHelloFailedCode = 1
+)
+
+var OfpHelloFailedCode_name = map[int32]string{
+	0: "OFPHFC_INCOMPATIBLE",
+	1: "OFPHFC_EPERM",
+}
+
+var OfpHelloFailedCode_value = map[string]int32{
+	"OFPHFC_INCOMPATIBLE": 0,
+	"OFPHFC_EPERM":        1,
+}
+
+func (x OfpHelloFailedCode) String() string {
+	return proto.EnumName(OfpHelloFailedCode_name, int32(x))
+}
+
+func (OfpHelloFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{31}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_REQUEST.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadRequestCode int32
+
+const (
+	OfpBadRequestCode_OFPBRC_BAD_VERSION               OfpBadRequestCode = 0
+	OfpBadRequestCode_OFPBRC_BAD_TYPE                  OfpBadRequestCode = 1
+	OfpBadRequestCode_OFPBRC_BAD_MULTIPART             OfpBadRequestCode = 2
+	OfpBadRequestCode_OFPBRC_BAD_EXPERIMENTER          OfpBadRequestCode = 3
+	OfpBadRequestCode_OFPBRC_BAD_EXP_TYPE              OfpBadRequestCode = 4
+	OfpBadRequestCode_OFPBRC_EPERM                     OfpBadRequestCode = 5
+	OfpBadRequestCode_OFPBRC_BAD_LEN                   OfpBadRequestCode = 6
+	OfpBadRequestCode_OFPBRC_BUFFER_EMPTY              OfpBadRequestCode = 7
+	OfpBadRequestCode_OFPBRC_BUFFER_UNKNOWN            OfpBadRequestCode = 8
+	OfpBadRequestCode_OFPBRC_BAD_TABLE_ID              OfpBadRequestCode = 9
+	OfpBadRequestCode_OFPBRC_IS_SLAVE                  OfpBadRequestCode = 10
+	OfpBadRequestCode_OFPBRC_BAD_PORT                  OfpBadRequestCode = 11
+	OfpBadRequestCode_OFPBRC_BAD_PACKET                OfpBadRequestCode = 12
+	OfpBadRequestCode_OFPBRC_MULTIPART_BUFFER_OVERFLOW OfpBadRequestCode = 13
+)
+
+var OfpBadRequestCode_name = map[int32]string{
+	0:  "OFPBRC_BAD_VERSION",
+	1:  "OFPBRC_BAD_TYPE",
+	2:  "OFPBRC_BAD_MULTIPART",
+	3:  "OFPBRC_BAD_EXPERIMENTER",
+	4:  "OFPBRC_BAD_EXP_TYPE",
+	5:  "OFPBRC_EPERM",
+	6:  "OFPBRC_BAD_LEN",
+	7:  "OFPBRC_BUFFER_EMPTY",
+	8:  "OFPBRC_BUFFER_UNKNOWN",
+	9:  "OFPBRC_BAD_TABLE_ID",
+	10: "OFPBRC_IS_SLAVE",
+	11: "OFPBRC_BAD_PORT",
+	12: "OFPBRC_BAD_PACKET",
+	13: "OFPBRC_MULTIPART_BUFFER_OVERFLOW",
+}
+
+var OfpBadRequestCode_value = map[string]int32{
+	"OFPBRC_BAD_VERSION":               0,
+	"OFPBRC_BAD_TYPE":                  1,
+	"OFPBRC_BAD_MULTIPART":             2,
+	"OFPBRC_BAD_EXPERIMENTER":          3,
+	"OFPBRC_BAD_EXP_TYPE":              4,
+	"OFPBRC_EPERM":                     5,
+	"OFPBRC_BAD_LEN":                   6,
+	"OFPBRC_BUFFER_EMPTY":              7,
+	"OFPBRC_BUFFER_UNKNOWN":            8,
+	"OFPBRC_BAD_TABLE_ID":              9,
+	"OFPBRC_IS_SLAVE":                  10,
+	"OFPBRC_BAD_PORT":                  11,
+	"OFPBRC_BAD_PACKET":                12,
+	"OFPBRC_MULTIPART_BUFFER_OVERFLOW": 13,
+}
+
+func (x OfpBadRequestCode) String() string {
+	return proto.EnumName(OfpBadRequestCode_name, int32(x))
+}
+
+func (OfpBadRequestCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{32}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_ACTION.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadActionCode int32
+
+const (
+	OfpBadActionCode_OFPBAC_BAD_TYPE           OfpBadActionCode = 0
+	OfpBadActionCode_OFPBAC_BAD_LEN            OfpBadActionCode = 1
+	OfpBadActionCode_OFPBAC_BAD_EXPERIMENTER   OfpBadActionCode = 2
+	OfpBadActionCode_OFPBAC_BAD_EXP_TYPE       OfpBadActionCode = 3
+	OfpBadActionCode_OFPBAC_BAD_OUT_PORT       OfpBadActionCode = 4
+	OfpBadActionCode_OFPBAC_BAD_ARGUMENT       OfpBadActionCode = 5
+	OfpBadActionCode_OFPBAC_EPERM              OfpBadActionCode = 6
+	OfpBadActionCode_OFPBAC_TOO_MANY           OfpBadActionCode = 7
+	OfpBadActionCode_OFPBAC_BAD_QUEUE          OfpBadActionCode = 8
+	OfpBadActionCode_OFPBAC_BAD_OUT_GROUP      OfpBadActionCode = 9
+	OfpBadActionCode_OFPBAC_MATCH_INCONSISTENT OfpBadActionCode = 10
+	OfpBadActionCode_OFPBAC_UNSUPPORTED_ORDER  OfpBadActionCode = 11
+	OfpBadActionCode_OFPBAC_BAD_TAG            OfpBadActionCode = 12
+	OfpBadActionCode_OFPBAC_BAD_SET_TYPE       OfpBadActionCode = 13
+	OfpBadActionCode_OFPBAC_BAD_SET_LEN        OfpBadActionCode = 14
+	OfpBadActionCode_OFPBAC_BAD_SET_ARGUMENT   OfpBadActionCode = 15
+)
+
+var OfpBadActionCode_name = map[int32]string{
+	0:  "OFPBAC_BAD_TYPE",
+	1:  "OFPBAC_BAD_LEN",
+	2:  "OFPBAC_BAD_EXPERIMENTER",
+	3:  "OFPBAC_BAD_EXP_TYPE",
+	4:  "OFPBAC_BAD_OUT_PORT",
+	5:  "OFPBAC_BAD_ARGUMENT",
+	6:  "OFPBAC_EPERM",
+	7:  "OFPBAC_TOO_MANY",
+	8:  "OFPBAC_BAD_QUEUE",
+	9:  "OFPBAC_BAD_OUT_GROUP",
+	10: "OFPBAC_MATCH_INCONSISTENT",
+	11: "OFPBAC_UNSUPPORTED_ORDER",
+	12: "OFPBAC_BAD_TAG",
+	13: "OFPBAC_BAD_SET_TYPE",
+	14: "OFPBAC_BAD_SET_LEN",
+	15: "OFPBAC_BAD_SET_ARGUMENT",
+}
+
+var OfpBadActionCode_value = map[string]int32{
+	"OFPBAC_BAD_TYPE":           0,
+	"OFPBAC_BAD_LEN":            1,
+	"OFPBAC_BAD_EXPERIMENTER":   2,
+	"OFPBAC_BAD_EXP_TYPE":       3,
+	"OFPBAC_BAD_OUT_PORT":       4,
+	"OFPBAC_BAD_ARGUMENT":       5,
+	"OFPBAC_EPERM":              6,
+	"OFPBAC_TOO_MANY":           7,
+	"OFPBAC_BAD_QUEUE":          8,
+	"OFPBAC_BAD_OUT_GROUP":      9,
+	"OFPBAC_MATCH_INCONSISTENT": 10,
+	"OFPBAC_UNSUPPORTED_ORDER":  11,
+	"OFPBAC_BAD_TAG":            12,
+	"OFPBAC_BAD_SET_TYPE":       13,
+	"OFPBAC_BAD_SET_LEN":        14,
+	"OFPBAC_BAD_SET_ARGUMENT":   15,
+}
+
+func (x OfpBadActionCode) String() string {
+	return proto.EnumName(OfpBadActionCode_name, int32(x))
+}
+
+func (OfpBadActionCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{33}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_INSTRUCTION.  'data' contains at
+// least the first 64 bytes of the failed request.
+type OfpBadInstructionCode int32
+
+const (
+	OfpBadInstructionCode_OFPBIC_UNKNOWN_INST        OfpBadInstructionCode = 0
+	OfpBadInstructionCode_OFPBIC_UNSUP_INST          OfpBadInstructionCode = 1
+	OfpBadInstructionCode_OFPBIC_BAD_TABLE_ID        OfpBadInstructionCode = 2
+	OfpBadInstructionCode_OFPBIC_UNSUP_METADATA      OfpBadInstructionCode = 3
+	OfpBadInstructionCode_OFPBIC_UNSUP_METADATA_MASK OfpBadInstructionCode = 4
+	OfpBadInstructionCode_OFPBIC_BAD_EXPERIMENTER    OfpBadInstructionCode = 5
+	OfpBadInstructionCode_OFPBIC_BAD_EXP_TYPE        OfpBadInstructionCode = 6
+	OfpBadInstructionCode_OFPBIC_BAD_LEN             OfpBadInstructionCode = 7
+	OfpBadInstructionCode_OFPBIC_EPERM               OfpBadInstructionCode = 8
+)
+
+var OfpBadInstructionCode_name = map[int32]string{
+	0: "OFPBIC_UNKNOWN_INST",
+	1: "OFPBIC_UNSUP_INST",
+	2: "OFPBIC_BAD_TABLE_ID",
+	3: "OFPBIC_UNSUP_METADATA",
+	4: "OFPBIC_UNSUP_METADATA_MASK",
+	5: "OFPBIC_BAD_EXPERIMENTER",
+	6: "OFPBIC_BAD_EXP_TYPE",
+	7: "OFPBIC_BAD_LEN",
+	8: "OFPBIC_EPERM",
+}
+
+var OfpBadInstructionCode_value = map[string]int32{
+	"OFPBIC_UNKNOWN_INST":        0,
+	"OFPBIC_UNSUP_INST":          1,
+	"OFPBIC_BAD_TABLE_ID":        2,
+	"OFPBIC_UNSUP_METADATA":      3,
+	"OFPBIC_UNSUP_METADATA_MASK": 4,
+	"OFPBIC_BAD_EXPERIMENTER":    5,
+	"OFPBIC_BAD_EXP_TYPE":        6,
+	"OFPBIC_BAD_LEN":             7,
+	"OFPBIC_EPERM":               8,
+}
+
+func (x OfpBadInstructionCode) String() string {
+	return proto.EnumName(OfpBadInstructionCode_name, int32(x))
+}
+
+func (OfpBadInstructionCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{34}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_MATCH.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadMatchCode int32
+
+const (
+	OfpBadMatchCode_OFPBMC_BAD_TYPE         OfpBadMatchCode = 0
+	OfpBadMatchCode_OFPBMC_BAD_LEN          OfpBadMatchCode = 1
+	OfpBadMatchCode_OFPBMC_BAD_TAG          OfpBadMatchCode = 2
+	OfpBadMatchCode_OFPBMC_BAD_DL_ADDR_MASK OfpBadMatchCode = 3
+	OfpBadMatchCode_OFPBMC_BAD_NW_ADDR_MASK OfpBadMatchCode = 4
+	OfpBadMatchCode_OFPBMC_BAD_WILDCARDS    OfpBadMatchCode = 5
+	OfpBadMatchCode_OFPBMC_BAD_FIELD        OfpBadMatchCode = 6
+	OfpBadMatchCode_OFPBMC_BAD_VALUE        OfpBadMatchCode = 7
+	OfpBadMatchCode_OFPBMC_BAD_MASK         OfpBadMatchCode = 8
+	OfpBadMatchCode_OFPBMC_BAD_PREREQ       OfpBadMatchCode = 9
+	OfpBadMatchCode_OFPBMC_DUP_FIELD        OfpBadMatchCode = 10
+	OfpBadMatchCode_OFPBMC_EPERM            OfpBadMatchCode = 11
+)
+
+var OfpBadMatchCode_name = map[int32]string{
+	0:  "OFPBMC_BAD_TYPE",
+	1:  "OFPBMC_BAD_LEN",
+	2:  "OFPBMC_BAD_TAG",
+	3:  "OFPBMC_BAD_DL_ADDR_MASK",
+	4:  "OFPBMC_BAD_NW_ADDR_MASK",
+	5:  "OFPBMC_BAD_WILDCARDS",
+	6:  "OFPBMC_BAD_FIELD",
+	7:  "OFPBMC_BAD_VALUE",
+	8:  "OFPBMC_BAD_MASK",
+	9:  "OFPBMC_BAD_PREREQ",
+	10: "OFPBMC_DUP_FIELD",
+	11: "OFPBMC_EPERM",
+}
+
+var OfpBadMatchCode_value = map[string]int32{
+	"OFPBMC_BAD_TYPE":         0,
+	"OFPBMC_BAD_LEN":          1,
+	"OFPBMC_BAD_TAG":          2,
+	"OFPBMC_BAD_DL_ADDR_MASK": 3,
+	"OFPBMC_BAD_NW_ADDR_MASK": 4,
+	"OFPBMC_BAD_WILDCARDS":    5,
+	"OFPBMC_BAD_FIELD":        6,
+	"OFPBMC_BAD_VALUE":        7,
+	"OFPBMC_BAD_MASK":         8,
+	"OFPBMC_BAD_PREREQ":       9,
+	"OFPBMC_DUP_FIELD":        10,
+	"OFPBMC_EPERM":            11,
+}
+
+func (x OfpBadMatchCode) String() string {
+	return proto.EnumName(OfpBadMatchCode_name, int32(x))
+}
+
+func (OfpBadMatchCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{35}
+}
+
+// ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpFlowModFailedCode int32
+
+const (
+	OfpFlowModFailedCode_OFPFMFC_UNKNOWN      OfpFlowModFailedCode = 0
+	OfpFlowModFailedCode_OFPFMFC_TABLE_FULL   OfpFlowModFailedCode = 1
+	OfpFlowModFailedCode_OFPFMFC_BAD_TABLE_ID OfpFlowModFailedCode = 2
+	OfpFlowModFailedCode_OFPFMFC_OVERLAP      OfpFlowModFailedCode = 3
+	OfpFlowModFailedCode_OFPFMFC_EPERM        OfpFlowModFailedCode = 4
+	OfpFlowModFailedCode_OFPFMFC_BAD_TIMEOUT  OfpFlowModFailedCode = 5
+	OfpFlowModFailedCode_OFPFMFC_BAD_COMMAND  OfpFlowModFailedCode = 6
+	OfpFlowModFailedCode_OFPFMFC_BAD_FLAGS    OfpFlowModFailedCode = 7
+)
+
+var OfpFlowModFailedCode_name = map[int32]string{
+	0: "OFPFMFC_UNKNOWN",
+	1: "OFPFMFC_TABLE_FULL",
+	2: "OFPFMFC_BAD_TABLE_ID",
+	3: "OFPFMFC_OVERLAP",
+	4: "OFPFMFC_EPERM",
+	5: "OFPFMFC_BAD_TIMEOUT",
+	6: "OFPFMFC_BAD_COMMAND",
+	7: "OFPFMFC_BAD_FLAGS",
+}
+
+var OfpFlowModFailedCode_value = map[string]int32{
+	"OFPFMFC_UNKNOWN":      0,
+	"OFPFMFC_TABLE_FULL":   1,
+	"OFPFMFC_BAD_TABLE_ID": 2,
+	"OFPFMFC_OVERLAP":      3,
+	"OFPFMFC_EPERM":        4,
+	"OFPFMFC_BAD_TIMEOUT":  5,
+	"OFPFMFC_BAD_COMMAND":  6,
+	"OFPFMFC_BAD_FLAGS":    7,
+}
+
+func (x OfpFlowModFailedCode) String() string {
+	return proto.EnumName(OfpFlowModFailedCode_name, int32(x))
+}
+
+func (OfpFlowModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{36}
+}
+
+// ofp_error_msg 'code' values for OFPET_GROUP_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpGroupModFailedCode int32
+
+const (
+	OfpGroupModFailedCode_OFPGMFC_GROUP_EXISTS         OfpGroupModFailedCode = 0
+	OfpGroupModFailedCode_OFPGMFC_INVALID_GROUP        OfpGroupModFailedCode = 1
+	OfpGroupModFailedCode_OFPGMFC_WEIGHT_UNSUPPORTED   OfpGroupModFailedCode = 2
+	OfpGroupModFailedCode_OFPGMFC_OUT_OF_GROUPS        OfpGroupModFailedCode = 3
+	OfpGroupModFailedCode_OFPGMFC_OUT_OF_BUCKETS       OfpGroupModFailedCode = 4
+	OfpGroupModFailedCode_OFPGMFC_CHAINING_UNSUPPORTED OfpGroupModFailedCode = 5
+	OfpGroupModFailedCode_OFPGMFC_WATCH_UNSUPPORTED    OfpGroupModFailedCode = 6
+	OfpGroupModFailedCode_OFPGMFC_LOOP                 OfpGroupModFailedCode = 7
+	OfpGroupModFailedCode_OFPGMFC_UNKNOWN_GROUP        OfpGroupModFailedCode = 8
+	OfpGroupModFailedCode_OFPGMFC_CHAINED_GROUP        OfpGroupModFailedCode = 9
+	OfpGroupModFailedCode_OFPGMFC_BAD_TYPE             OfpGroupModFailedCode = 10
+	OfpGroupModFailedCode_OFPGMFC_BAD_COMMAND          OfpGroupModFailedCode = 11
+	OfpGroupModFailedCode_OFPGMFC_BAD_BUCKET           OfpGroupModFailedCode = 12
+	OfpGroupModFailedCode_OFPGMFC_BAD_WATCH            OfpGroupModFailedCode = 13
+	OfpGroupModFailedCode_OFPGMFC_EPERM                OfpGroupModFailedCode = 14
+)
+
+var OfpGroupModFailedCode_name = map[int32]string{
+	0:  "OFPGMFC_GROUP_EXISTS",
+	1:  "OFPGMFC_INVALID_GROUP",
+	2:  "OFPGMFC_WEIGHT_UNSUPPORTED",
+	3:  "OFPGMFC_OUT_OF_GROUPS",
+	4:  "OFPGMFC_OUT_OF_BUCKETS",
+	5:  "OFPGMFC_CHAINING_UNSUPPORTED",
+	6:  "OFPGMFC_WATCH_UNSUPPORTED",
+	7:  "OFPGMFC_LOOP",
+	8:  "OFPGMFC_UNKNOWN_GROUP",
+	9:  "OFPGMFC_CHAINED_GROUP",
+	10: "OFPGMFC_BAD_TYPE",
+	11: "OFPGMFC_BAD_COMMAND",
+	12: "OFPGMFC_BAD_BUCKET",
+	13: "OFPGMFC_BAD_WATCH",
+	14: "OFPGMFC_EPERM",
+}
+
+var OfpGroupModFailedCode_value = map[string]int32{
+	"OFPGMFC_GROUP_EXISTS":         0,
+	"OFPGMFC_INVALID_GROUP":        1,
+	"OFPGMFC_WEIGHT_UNSUPPORTED":   2,
+	"OFPGMFC_OUT_OF_GROUPS":        3,
+	"OFPGMFC_OUT_OF_BUCKETS":       4,
+	"OFPGMFC_CHAINING_UNSUPPORTED": 5,
+	"OFPGMFC_WATCH_UNSUPPORTED":    6,
+	"OFPGMFC_LOOP":                 7,
+	"OFPGMFC_UNKNOWN_GROUP":        8,
+	"OFPGMFC_CHAINED_GROUP":        9,
+	"OFPGMFC_BAD_TYPE":             10,
+	"OFPGMFC_BAD_COMMAND":          11,
+	"OFPGMFC_BAD_BUCKET":           12,
+	"OFPGMFC_BAD_WATCH":            13,
+	"OFPGMFC_EPERM":                14,
+}
+
+func (x OfpGroupModFailedCode) String() string {
+	return proto.EnumName(OfpGroupModFailedCode_name, int32(x))
+}
+
+func (OfpGroupModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{37}
+}
+
+// ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpPortModFailedCode int32
+
+const (
+	OfpPortModFailedCode_OFPPMFC_BAD_PORT      OfpPortModFailedCode = 0
+	OfpPortModFailedCode_OFPPMFC_BAD_HW_ADDR   OfpPortModFailedCode = 1
+	OfpPortModFailedCode_OFPPMFC_BAD_CONFIG    OfpPortModFailedCode = 2
+	OfpPortModFailedCode_OFPPMFC_BAD_ADVERTISE OfpPortModFailedCode = 3
+	OfpPortModFailedCode_OFPPMFC_EPERM         OfpPortModFailedCode = 4
+)
+
+var OfpPortModFailedCode_name = map[int32]string{
+	0: "OFPPMFC_BAD_PORT",
+	1: "OFPPMFC_BAD_HW_ADDR",
+	2: "OFPPMFC_BAD_CONFIG",
+	3: "OFPPMFC_BAD_ADVERTISE",
+	4: "OFPPMFC_EPERM",
+}
+
+var OfpPortModFailedCode_value = map[string]int32{
+	"OFPPMFC_BAD_PORT":      0,
+	"OFPPMFC_BAD_HW_ADDR":   1,
+	"OFPPMFC_BAD_CONFIG":    2,
+	"OFPPMFC_BAD_ADVERTISE": 3,
+	"OFPPMFC_EPERM":         4,
+}
+
+func (x OfpPortModFailedCode) String() string {
+	return proto.EnumName(OfpPortModFailedCode_name, int32(x))
+}
+
+func (OfpPortModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{38}
+}
+
+// ofp_error_msg 'code' values for OFPET_TABLE_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpTableModFailedCode int32
+
+const (
+	OfpTableModFailedCode_OFPTMFC_BAD_TABLE  OfpTableModFailedCode = 0
+	OfpTableModFailedCode_OFPTMFC_BAD_CONFIG OfpTableModFailedCode = 1
+	OfpTableModFailedCode_OFPTMFC_EPERM      OfpTableModFailedCode = 2
+)
+
+var OfpTableModFailedCode_name = map[int32]string{
+	0: "OFPTMFC_BAD_TABLE",
+	1: "OFPTMFC_BAD_CONFIG",
+	2: "OFPTMFC_EPERM",
+}
+
+var OfpTableModFailedCode_value = map[string]int32{
+	"OFPTMFC_BAD_TABLE":  0,
+	"OFPTMFC_BAD_CONFIG": 1,
+	"OFPTMFC_EPERM":      2,
+}
+
+func (x OfpTableModFailedCode) String() string {
+	return proto.EnumName(OfpTableModFailedCode_name, int32(x))
+}
+
+func (OfpTableModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{39}
+}
+
+// ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request
+type OfpQueueOpFailedCode int32
+
+const (
+	OfpQueueOpFailedCode_OFPQOFC_BAD_PORT  OfpQueueOpFailedCode = 0
+	OfpQueueOpFailedCode_OFPQOFC_BAD_QUEUE OfpQueueOpFailedCode = 1
+	OfpQueueOpFailedCode_OFPQOFC_EPERM     OfpQueueOpFailedCode = 2
+)
+
+var OfpQueueOpFailedCode_name = map[int32]string{
+	0: "OFPQOFC_BAD_PORT",
+	1: "OFPQOFC_BAD_QUEUE",
+	2: "OFPQOFC_EPERM",
+}
+
+var OfpQueueOpFailedCode_value = map[string]int32{
+	"OFPQOFC_BAD_PORT":  0,
+	"OFPQOFC_BAD_QUEUE": 1,
+	"OFPQOFC_EPERM":     2,
+}
+
+func (x OfpQueueOpFailedCode) String() string {
+	return proto.EnumName(OfpQueueOpFailedCode_name, int32(x))
+}
+
+func (OfpQueueOpFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{40}
+}
+
+// ofp_error_msg 'code' values for OFPET_SWITCH_CONFIG_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpSwitchConfigFailedCode int32
+
+const (
+	OfpSwitchConfigFailedCode_OFPSCFC_BAD_FLAGS OfpSwitchConfigFailedCode = 0
+	OfpSwitchConfigFailedCode_OFPSCFC_BAD_LEN   OfpSwitchConfigFailedCode = 1
+	OfpSwitchConfigFailedCode_OFPSCFC_EPERM     OfpSwitchConfigFailedCode = 2
+)
+
+var OfpSwitchConfigFailedCode_name = map[int32]string{
+	0: "OFPSCFC_BAD_FLAGS",
+	1: "OFPSCFC_BAD_LEN",
+	2: "OFPSCFC_EPERM",
+}
+
+var OfpSwitchConfigFailedCode_value = map[string]int32{
+	"OFPSCFC_BAD_FLAGS": 0,
+	"OFPSCFC_BAD_LEN":   1,
+	"OFPSCFC_EPERM":     2,
+}
+
+func (x OfpSwitchConfigFailedCode) String() string {
+	return proto.EnumName(OfpSwitchConfigFailedCode_name, int32(x))
+}
+
+func (OfpSwitchConfigFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{41}
+}
+
+// ofp_error_msg 'code' values for OFPET_ROLE_REQUEST_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpRoleRequestFailedCode int32
+
+const (
+	OfpRoleRequestFailedCode_OFPRRFC_STALE    OfpRoleRequestFailedCode = 0
+	OfpRoleRequestFailedCode_OFPRRFC_UNSUP    OfpRoleRequestFailedCode = 1
+	OfpRoleRequestFailedCode_OFPRRFC_BAD_ROLE OfpRoleRequestFailedCode = 2
+)
+
+var OfpRoleRequestFailedCode_name = map[int32]string{
+	0: "OFPRRFC_STALE",
+	1: "OFPRRFC_UNSUP",
+	2: "OFPRRFC_BAD_ROLE",
+}
+
+var OfpRoleRequestFailedCode_value = map[string]int32{
+	"OFPRRFC_STALE":    0,
+	"OFPRRFC_UNSUP":    1,
+	"OFPRRFC_BAD_ROLE": 2,
+}
+
+func (x OfpRoleRequestFailedCode) String() string {
+	return proto.EnumName(OfpRoleRequestFailedCode_name, int32(x))
+}
+
+func (OfpRoleRequestFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{42}
+}
+
+// ofp_error_msg 'code' values for OFPET_METER_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpMeterModFailedCode int32
+
+const (
+	OfpMeterModFailedCode_OFPMMFC_UNKNOWN         OfpMeterModFailedCode = 0
+	OfpMeterModFailedCode_OFPMMFC_METER_EXISTS    OfpMeterModFailedCode = 1
+	OfpMeterModFailedCode_OFPMMFC_INVALID_METER   OfpMeterModFailedCode = 2
+	OfpMeterModFailedCode_OFPMMFC_UNKNOWN_METER   OfpMeterModFailedCode = 3
+	OfpMeterModFailedCode_OFPMMFC_BAD_COMMAND     OfpMeterModFailedCode = 4
+	OfpMeterModFailedCode_OFPMMFC_BAD_FLAGS       OfpMeterModFailedCode = 5
+	OfpMeterModFailedCode_OFPMMFC_BAD_RATE        OfpMeterModFailedCode = 6
+	OfpMeterModFailedCode_OFPMMFC_BAD_BURST       OfpMeterModFailedCode = 7
+	OfpMeterModFailedCode_OFPMMFC_BAD_BAND        OfpMeterModFailedCode = 8
+	OfpMeterModFailedCode_OFPMMFC_BAD_BAND_DETAIL OfpMeterModFailedCode = 9
+	OfpMeterModFailedCode_OFPMMFC_OUT_OF_METERS   OfpMeterModFailedCode = 10
+	OfpMeterModFailedCode_OFPMMFC_OUT_OF_BANDS    OfpMeterModFailedCode = 11
+)
+
+var OfpMeterModFailedCode_name = map[int32]string{
+	0:  "OFPMMFC_UNKNOWN",
+	1:  "OFPMMFC_METER_EXISTS",
+	2:  "OFPMMFC_INVALID_METER",
+	3:  "OFPMMFC_UNKNOWN_METER",
+	4:  "OFPMMFC_BAD_COMMAND",
+	5:  "OFPMMFC_BAD_FLAGS",
+	6:  "OFPMMFC_BAD_RATE",
+	7:  "OFPMMFC_BAD_BURST",
+	8:  "OFPMMFC_BAD_BAND",
+	9:  "OFPMMFC_BAD_BAND_DETAIL",
+	10: "OFPMMFC_OUT_OF_METERS",
+	11: "OFPMMFC_OUT_OF_BANDS",
+}
+
+var OfpMeterModFailedCode_value = map[string]int32{
+	"OFPMMFC_UNKNOWN":         0,
+	"OFPMMFC_METER_EXISTS":    1,
+	"OFPMMFC_INVALID_METER":   2,
+	"OFPMMFC_UNKNOWN_METER":   3,
+	"OFPMMFC_BAD_COMMAND":     4,
+	"OFPMMFC_BAD_FLAGS":       5,
+	"OFPMMFC_BAD_RATE":        6,
+	"OFPMMFC_BAD_BURST":       7,
+	"OFPMMFC_BAD_BAND":        8,
+	"OFPMMFC_BAD_BAND_DETAIL": 9,
+	"OFPMMFC_OUT_OF_METERS":   10,
+	"OFPMMFC_OUT_OF_BANDS":    11,
+}
+
+func (x OfpMeterModFailedCode) String() string {
+	return proto.EnumName(OfpMeterModFailedCode_name, int32(x))
+}
+
+func (OfpMeterModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{43}
+}
+
+// ofp_error_msg 'code' values for OFPET_TABLE_FEATURES_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpTableFeaturesFailedCode int32
+
+const (
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_TABLE    OfpTableFeaturesFailedCode = 0
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_METADATA OfpTableFeaturesFailedCode = 1
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_TYPE     OfpTableFeaturesFailedCode = 2
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_LEN      OfpTableFeaturesFailedCode = 3
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_ARGUMENT OfpTableFeaturesFailedCode = 4
+	OfpTableFeaturesFailedCode_OFPTFFC_EPERM        OfpTableFeaturesFailedCode = 5
+)
+
+var OfpTableFeaturesFailedCode_name = map[int32]string{
+	0: "OFPTFFC_BAD_TABLE",
+	1: "OFPTFFC_BAD_METADATA",
+	2: "OFPTFFC_BAD_TYPE",
+	3: "OFPTFFC_BAD_LEN",
+	4: "OFPTFFC_BAD_ARGUMENT",
+	5: "OFPTFFC_EPERM",
+}
+
+var OfpTableFeaturesFailedCode_value = map[string]int32{
+	"OFPTFFC_BAD_TABLE":    0,
+	"OFPTFFC_BAD_METADATA": 1,
+	"OFPTFFC_BAD_TYPE":     2,
+	"OFPTFFC_BAD_LEN":      3,
+	"OFPTFFC_BAD_ARGUMENT": 4,
+	"OFPTFFC_EPERM":        5,
+}
+
+func (x OfpTableFeaturesFailedCode) String() string {
+	return proto.EnumName(OfpTableFeaturesFailedCode_name, int32(x))
+}
+
+func (OfpTableFeaturesFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{44}
+}
+
+type OfpMultipartType int32
+
+const (
+	// Description of this OpenFlow switch.
+	// The request body is empty.
+	// The reply body is struct ofp_desc.
+	OfpMultipartType_OFPMP_DESC OfpMultipartType = 0
+	// Individual flow statistics.
+	// The request body is struct ofp_flow_stats_request.
+	// The reply body is an array of struct ofp_flow_stats.
+	OfpMultipartType_OFPMP_FLOW OfpMultipartType = 1
+	// Aggregate flow statistics.
+	// The request body is struct ofp_aggregate_stats_request.
+	// The reply body is struct ofp_aggregate_stats_reply.
+	OfpMultipartType_OFPMP_AGGREGATE OfpMultipartType = 2
+	// Flow table statistics.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_table_stats.
+	OfpMultipartType_OFPMP_TABLE OfpMultipartType = 3
+	// Port statistics.
+	// The request body is struct ofp_port_stats_request.
+	// The reply body is an array of struct ofp_port_stats.
+	OfpMultipartType_OFPMP_PORT_STATS OfpMultipartType = 4
+	// Queue statistics for a port
+	// The request body is struct ofp_queue_stats_request.
+	// The reply body is an array of struct ofp_queue_stats
+	OfpMultipartType_OFPMP_QUEUE OfpMultipartType = 5
+	// Group counter statistics.
+	// The request body is struct ofp_group_stats_request.
+	// The reply is an array of struct ofp_group_stats.
+	OfpMultipartType_OFPMP_GROUP OfpMultipartType = 6
+	// Group description.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_group_desc.
+	OfpMultipartType_OFPMP_GROUP_DESC OfpMultipartType = 7
+	// Group features.
+	// The request body is empty.
+	// The reply body is struct ofp_group_features.
+	OfpMultipartType_OFPMP_GROUP_FEATURES OfpMultipartType = 8
+	// Meter statistics.
+	// The request body is struct ofp_meter_multipart_requests.
+	// The reply body is an array of struct ofp_meter_stats.
+	OfpMultipartType_OFPMP_METER OfpMultipartType = 9
+	// Meter configuration.
+	// The request body is struct ofp_meter_multipart_requests.
+	// The reply body is an array of struct ofp_meter_config.
+	OfpMultipartType_OFPMP_METER_CONFIG OfpMultipartType = 10
+	// Meter features.
+	// The request body is empty.
+	// The reply body is struct ofp_meter_features.
+	OfpMultipartType_OFPMP_METER_FEATURES OfpMultipartType = 11
+	// Table features.
+	// The request body is either empty or contains an array of
+	// struct ofp_table_features containing the controller's
+	// desired view of the switch. If the switch is unable to
+	// set the specified view an error is returned.
+	// The reply body is an array of struct ofp_table_features.
+	OfpMultipartType_OFPMP_TABLE_FEATURES OfpMultipartType = 12
+	// Port description.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_port.
+	OfpMultipartType_OFPMP_PORT_DESC OfpMultipartType = 13
+	// Experimenter extension.
+	// The request and reply bodies begin with
+	// struct ofp_experimenter_multipart_header.
+	// The request and reply bodies are otherwise experimenter-defined.
+	OfpMultipartType_OFPMP_EXPERIMENTER OfpMultipartType = 65535
+)
+
+var OfpMultipartType_name = map[int32]string{
+	0:     "OFPMP_DESC",
+	1:     "OFPMP_FLOW",
+	2:     "OFPMP_AGGREGATE",
+	3:     "OFPMP_TABLE",
+	4:     "OFPMP_PORT_STATS",
+	5:     "OFPMP_QUEUE",
+	6:     "OFPMP_GROUP",
+	7:     "OFPMP_GROUP_DESC",
+	8:     "OFPMP_GROUP_FEATURES",
+	9:     "OFPMP_METER",
+	10:    "OFPMP_METER_CONFIG",
+	11:    "OFPMP_METER_FEATURES",
+	12:    "OFPMP_TABLE_FEATURES",
+	13:    "OFPMP_PORT_DESC",
+	65535: "OFPMP_EXPERIMENTER",
+}
+
+var OfpMultipartType_value = map[string]int32{
+	"OFPMP_DESC":           0,
+	"OFPMP_FLOW":           1,
+	"OFPMP_AGGREGATE":      2,
+	"OFPMP_TABLE":          3,
+	"OFPMP_PORT_STATS":     4,
+	"OFPMP_QUEUE":          5,
+	"OFPMP_GROUP":          6,
+	"OFPMP_GROUP_DESC":     7,
+	"OFPMP_GROUP_FEATURES": 8,
+	"OFPMP_METER":          9,
+	"OFPMP_METER_CONFIG":   10,
+	"OFPMP_METER_FEATURES": 11,
+	"OFPMP_TABLE_FEATURES": 12,
+	"OFPMP_PORT_DESC":      13,
+	"OFPMP_EXPERIMENTER":   65535,
+}
+
+func (x OfpMultipartType) String() string {
+	return proto.EnumName(OfpMultipartType_name, int32(x))
+}
+
+func (OfpMultipartType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{45}
+}
+
+type OfpMultipartRequestFlags int32
+
+const (
+	OfpMultipartRequestFlags_OFPMPF_REQ_INVALID OfpMultipartRequestFlags = 0
+	OfpMultipartRequestFlags_OFPMPF_REQ_MORE    OfpMultipartRequestFlags = 1
+)
+
+var OfpMultipartRequestFlags_name = map[int32]string{
+	0: "OFPMPF_REQ_INVALID",
+	1: "OFPMPF_REQ_MORE",
+}
+
+var OfpMultipartRequestFlags_value = map[string]int32{
+	"OFPMPF_REQ_INVALID": 0,
+	"OFPMPF_REQ_MORE":    1,
+}
+
+func (x OfpMultipartRequestFlags) String() string {
+	return proto.EnumName(OfpMultipartRequestFlags_name, int32(x))
+}
+
+func (OfpMultipartRequestFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{46}
+}
+
+type OfpMultipartReplyFlags int32
+
+const (
+	OfpMultipartReplyFlags_OFPMPF_REPLY_INVALID OfpMultipartReplyFlags = 0
+	OfpMultipartReplyFlags_OFPMPF_REPLY_MORE    OfpMultipartReplyFlags = 1
+)
+
+var OfpMultipartReplyFlags_name = map[int32]string{
+	0: "OFPMPF_REPLY_INVALID",
+	1: "OFPMPF_REPLY_MORE",
+}
+
+var OfpMultipartReplyFlags_value = map[string]int32{
+	"OFPMPF_REPLY_INVALID": 0,
+	"OFPMPF_REPLY_MORE":    1,
+}
+
+func (x OfpMultipartReplyFlags) String() string {
+	return proto.EnumName(OfpMultipartReplyFlags_name, int32(x))
+}
+
+func (OfpMultipartReplyFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{47}
+}
+
+// Table Feature property types.
+// Low order bit cleared indicates a property for a regular Flow Entry.
+// Low order bit set indicates a property for the Table-Miss Flow Entry.
+type OfpTableFeaturePropType int32
+
+const (
+	OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS        OfpTableFeaturePropType = 0
+	OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS_MISS   OfpTableFeaturePropType = 1
+	OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES         OfpTableFeaturePropType = 2
+	OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES_MISS    OfpTableFeaturePropType = 3
+	OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS       OfpTableFeaturePropType = 4
+	OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS_MISS  OfpTableFeaturePropType = 5
+	OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS       OfpTableFeaturePropType = 6
+	OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS_MISS  OfpTableFeaturePropType = 7
+	OfpTableFeaturePropType_OFPTFPT_MATCH               OfpTableFeaturePropType = 8
+	OfpTableFeaturePropType_OFPTFPT_WILDCARDS           OfpTableFeaturePropType = 10
+	OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD      OfpTableFeaturePropType = 12
+	OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD_MISS OfpTableFeaturePropType = 13
+	OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD      OfpTableFeaturePropType = 14
+	OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD_MISS OfpTableFeaturePropType = 15
+	OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER        OfpTableFeaturePropType = 65534
+	OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER_MISS   OfpTableFeaturePropType = 65535
+)
+
+var OfpTableFeaturePropType_name = map[int32]string{
+	0:     "OFPTFPT_INSTRUCTIONS",
+	1:     "OFPTFPT_INSTRUCTIONS_MISS",
+	2:     "OFPTFPT_NEXT_TABLES",
+	3:     "OFPTFPT_NEXT_TABLES_MISS",
+	4:     "OFPTFPT_WRITE_ACTIONS",
+	5:     "OFPTFPT_WRITE_ACTIONS_MISS",
+	6:     "OFPTFPT_APPLY_ACTIONS",
+	7:     "OFPTFPT_APPLY_ACTIONS_MISS",
+	8:     "OFPTFPT_MATCH",
+	10:    "OFPTFPT_WILDCARDS",
+	12:    "OFPTFPT_WRITE_SETFIELD",
+	13:    "OFPTFPT_WRITE_SETFIELD_MISS",
+	14:    "OFPTFPT_APPLY_SETFIELD",
+	15:    "OFPTFPT_APPLY_SETFIELD_MISS",
+	65534: "OFPTFPT_EXPERIMENTER",
+	65535: "OFPTFPT_EXPERIMENTER_MISS",
+}
+
+var OfpTableFeaturePropType_value = map[string]int32{
+	"OFPTFPT_INSTRUCTIONS":        0,
+	"OFPTFPT_INSTRUCTIONS_MISS":   1,
+	"OFPTFPT_NEXT_TABLES":         2,
+	"OFPTFPT_NEXT_TABLES_MISS":    3,
+	"OFPTFPT_WRITE_ACTIONS":       4,
+	"OFPTFPT_WRITE_ACTIONS_MISS":  5,
+	"OFPTFPT_APPLY_ACTIONS":       6,
+	"OFPTFPT_APPLY_ACTIONS_MISS":  7,
+	"OFPTFPT_MATCH":               8,
+	"OFPTFPT_WILDCARDS":           10,
+	"OFPTFPT_WRITE_SETFIELD":      12,
+	"OFPTFPT_WRITE_SETFIELD_MISS": 13,
+	"OFPTFPT_APPLY_SETFIELD":      14,
+	"OFPTFPT_APPLY_SETFIELD_MISS": 15,
+	"OFPTFPT_EXPERIMENTER":        65534,
+	"OFPTFPT_EXPERIMENTER_MISS":   65535,
+}
+
+func (x OfpTableFeaturePropType) String() string {
+	return proto.EnumName(OfpTableFeaturePropType_name, int32(x))
+}
+
+func (OfpTableFeaturePropType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{48}
+}
+
+// Group configuration flags
+type OfpGroupCapabilities int32
+
+const (
+	OfpGroupCapabilities_OFPGFC_INVALID         OfpGroupCapabilities = 0
+	OfpGroupCapabilities_OFPGFC_SELECT_WEIGHT   OfpGroupCapabilities = 1
+	OfpGroupCapabilities_OFPGFC_SELECT_LIVENESS OfpGroupCapabilities = 2
+	OfpGroupCapabilities_OFPGFC_CHAINING        OfpGroupCapabilities = 4
+	OfpGroupCapabilities_OFPGFC_CHAINING_CHECKS OfpGroupCapabilities = 8
+)
+
+var OfpGroupCapabilities_name = map[int32]string{
+	0: "OFPGFC_INVALID",
+	1: "OFPGFC_SELECT_WEIGHT",
+	2: "OFPGFC_SELECT_LIVENESS",
+	4: "OFPGFC_CHAINING",
+	8: "OFPGFC_CHAINING_CHECKS",
+}
+
+var OfpGroupCapabilities_value = map[string]int32{
+	"OFPGFC_INVALID":         0,
+	"OFPGFC_SELECT_WEIGHT":   1,
+	"OFPGFC_SELECT_LIVENESS": 2,
+	"OFPGFC_CHAINING":        4,
+	"OFPGFC_CHAINING_CHECKS": 8,
+}
+
+func (x OfpGroupCapabilities) String() string {
+	return proto.EnumName(OfpGroupCapabilities_name, int32(x))
+}
+
+func (OfpGroupCapabilities) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{49}
+}
+
+type OfpQueueProperties int32
+
+const (
+	OfpQueueProperties_OFPQT_INVALID      OfpQueueProperties = 0
+	OfpQueueProperties_OFPQT_MIN_RATE     OfpQueueProperties = 1
+	OfpQueueProperties_OFPQT_MAX_RATE     OfpQueueProperties = 2
+	OfpQueueProperties_OFPQT_EXPERIMENTER OfpQueueProperties = 65535
+)
+
+var OfpQueueProperties_name = map[int32]string{
+	0:     "OFPQT_INVALID",
+	1:     "OFPQT_MIN_RATE",
+	2:     "OFPQT_MAX_RATE",
+	65535: "OFPQT_EXPERIMENTER",
+}
+
+var OfpQueueProperties_value = map[string]int32{
+	"OFPQT_INVALID":      0,
+	"OFPQT_MIN_RATE":     1,
+	"OFPQT_MAX_RATE":     2,
+	"OFPQT_EXPERIMENTER": 65535,
+}
+
+func (x OfpQueueProperties) String() string {
+	return proto.EnumName(OfpQueueProperties_name, int32(x))
+}
+
+func (OfpQueueProperties) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{50}
+}
+
+// Controller roles.
+type OfpControllerRole int32
+
+const (
+	OfpControllerRole_OFPCR_ROLE_NOCHANGE OfpControllerRole = 0
+	OfpControllerRole_OFPCR_ROLE_EQUAL    OfpControllerRole = 1
+	OfpControllerRole_OFPCR_ROLE_MASTER   OfpControllerRole = 2
+	OfpControllerRole_OFPCR_ROLE_SLAVE    OfpControllerRole = 3
+)
+
+var OfpControllerRole_name = map[int32]string{
+	0: "OFPCR_ROLE_NOCHANGE",
+	1: "OFPCR_ROLE_EQUAL",
+	2: "OFPCR_ROLE_MASTER",
+	3: "OFPCR_ROLE_SLAVE",
+}
+
+var OfpControllerRole_value = map[string]int32{
+	"OFPCR_ROLE_NOCHANGE": 0,
+	"OFPCR_ROLE_EQUAL":    1,
+	"OFPCR_ROLE_MASTER":   2,
+	"OFPCR_ROLE_SLAVE":    3,
+}
+
+func (x OfpControllerRole) String() string {
+	return proto.EnumName(OfpControllerRole_name, int32(x))
+}
+
+func (OfpControllerRole) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{51}
+}
+
+// Header on all OpenFlow packets.
+type OfpHeader struct {
+	Version              uint32   `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
+	Type                 OfpType  `protobuf:"varint,2,opt,name=type,proto3,enum=openflow_13.OfpType" json:"type,omitempty"`
+	Xid                  uint32   `protobuf:"varint,3,opt,name=xid,proto3" json:"xid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpHeader) Reset()         { *m = OfpHeader{} }
+func (m *OfpHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpHeader) ProtoMessage()    {}
+func (*OfpHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{0}
+}
+
+func (m *OfpHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHeader.Unmarshal(m, b)
+}
+func (m *OfpHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHeader.Merge(m, src)
+}
+func (m *OfpHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpHeader.Size(m)
+}
+func (m *OfpHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHeader proto.InternalMessageInfo
+
+func (m *OfpHeader) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *OfpHeader) GetType() OfpType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpType_OFPT_HELLO
+}
+
+func (m *OfpHeader) GetXid() uint32 {
+	if m != nil {
+		return m.Xid
+	}
+	return 0
+}
+
+// Common header for all Hello Elements
+type OfpHelloElemHeader struct {
+	Type OfpHelloElemType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpHelloElemType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Element:
+	//	*OfpHelloElemHeader_Versionbitmap
+	Element              isOfpHelloElemHeader_Element `protobuf_oneof:"element"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *OfpHelloElemHeader) Reset()         { *m = OfpHelloElemHeader{} }
+func (m *OfpHelloElemHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpHelloElemHeader) ProtoMessage()    {}
+func (*OfpHelloElemHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{1}
+}
+
+func (m *OfpHelloElemHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHelloElemHeader.Unmarshal(m, b)
+}
+func (m *OfpHelloElemHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHelloElemHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpHelloElemHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHelloElemHeader.Merge(m, src)
+}
+func (m *OfpHelloElemHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpHelloElemHeader.Size(m)
+}
+func (m *OfpHelloElemHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHelloElemHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHelloElemHeader proto.InternalMessageInfo
+
+func (m *OfpHelloElemHeader) GetType() OfpHelloElemType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpHelloElemType_OFPHET_INVALID
+}
+
+type isOfpHelloElemHeader_Element interface {
+	isOfpHelloElemHeader_Element()
+}
+
+type OfpHelloElemHeader_Versionbitmap struct {
+	Versionbitmap *OfpHelloElemVersionbitmap `protobuf:"bytes,2,opt,name=versionbitmap,proto3,oneof"`
+}
+
+func (*OfpHelloElemHeader_Versionbitmap) isOfpHelloElemHeader_Element() {}
+
+func (m *OfpHelloElemHeader) GetElement() isOfpHelloElemHeader_Element {
+	if m != nil {
+		return m.Element
+	}
+	return nil
+}
+
+func (m *OfpHelloElemHeader) GetVersionbitmap() *OfpHelloElemVersionbitmap {
+	if x, ok := m.GetElement().(*OfpHelloElemHeader_Versionbitmap); ok {
+		return x.Versionbitmap
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpHelloElemHeader) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpHelloElemHeader_Versionbitmap)(nil),
+	}
+}
+
+// Version bitmap Hello Element
+type OfpHelloElemVersionbitmap struct {
+	Bitmaps              []uint32 `protobuf:"varint,2,rep,packed,name=bitmaps,proto3" json:"bitmaps,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpHelloElemVersionbitmap) Reset()         { *m = OfpHelloElemVersionbitmap{} }
+func (m *OfpHelloElemVersionbitmap) String() string { return proto.CompactTextString(m) }
+func (*OfpHelloElemVersionbitmap) ProtoMessage()    {}
+func (*OfpHelloElemVersionbitmap) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{2}
+}
+
+func (m *OfpHelloElemVersionbitmap) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Unmarshal(m, b)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Marshal(b, m, deterministic)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHelloElemVersionbitmap.Merge(m, src)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Size() int {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Size(m)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHelloElemVersionbitmap.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHelloElemVersionbitmap proto.InternalMessageInfo
+
+func (m *OfpHelloElemVersionbitmap) GetBitmaps() []uint32 {
+	if m != nil {
+		return m.Bitmaps
+	}
+	return nil
+}
+
+// OFPT_HELLO.  This message includes zero or more hello elements having
+// variable size. Unknown elements types must be ignored/skipped, to allow
+// for future extensions.
+type OfpHello struct {
+	// Hello element list
+	Elements             []*OfpHelloElemHeader `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpHello) Reset()         { *m = OfpHello{} }
+func (m *OfpHello) String() string { return proto.CompactTextString(m) }
+func (*OfpHello) ProtoMessage()    {}
+func (*OfpHello) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{3}
+}
+
+func (m *OfpHello) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHello.Unmarshal(m, b)
+}
+func (m *OfpHello) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHello.Marshal(b, m, deterministic)
+}
+func (m *OfpHello) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHello.Merge(m, src)
+}
+func (m *OfpHello) XXX_Size() int {
+	return xxx_messageInfo_OfpHello.Size(m)
+}
+func (m *OfpHello) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHello.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHello proto.InternalMessageInfo
+
+func (m *OfpHello) GetElements() []*OfpHelloElemHeader {
+	if m != nil {
+		return m.Elements
+	}
+	return nil
+}
+
+// Switch configuration.
+type OfpSwitchConfig struct {
+	//ofp_header header;
+	Flags                uint32   `protobuf:"varint,1,opt,name=flags,proto3" json:"flags,omitempty"`
+	MissSendLen          uint32   `protobuf:"varint,2,opt,name=miss_send_len,json=missSendLen,proto3" json:"miss_send_len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpSwitchConfig) Reset()         { *m = OfpSwitchConfig{} }
+func (m *OfpSwitchConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpSwitchConfig) ProtoMessage()    {}
+func (*OfpSwitchConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{4}
+}
+
+func (m *OfpSwitchConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpSwitchConfig.Unmarshal(m, b)
+}
+func (m *OfpSwitchConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpSwitchConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpSwitchConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpSwitchConfig.Merge(m, src)
+}
+func (m *OfpSwitchConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpSwitchConfig.Size(m)
+}
+func (m *OfpSwitchConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpSwitchConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpSwitchConfig proto.InternalMessageInfo
+
+func (m *OfpSwitchConfig) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpSwitchConfig) GetMissSendLen() uint32 {
+	if m != nil {
+		return m.MissSendLen
+	}
+	return 0
+}
+
+// Configure/Modify behavior of a flow table
+type OfpTableMod struct {
+	//ofp_header header;
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Config               uint32   `protobuf:"varint,2,opt,name=config,proto3" json:"config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableMod) Reset()         { *m = OfpTableMod{} }
+func (m *OfpTableMod) String() string { return proto.CompactTextString(m) }
+func (*OfpTableMod) ProtoMessage()    {}
+func (*OfpTableMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{5}
+}
+
+func (m *OfpTableMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableMod.Unmarshal(m, b)
+}
+func (m *OfpTableMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableMod.Marshal(b, m, deterministic)
+}
+func (m *OfpTableMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableMod.Merge(m, src)
+}
+func (m *OfpTableMod) XXX_Size() int {
+	return xxx_messageInfo_OfpTableMod.Size(m)
+}
+func (m *OfpTableMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableMod proto.InternalMessageInfo
+
+func (m *OfpTableMod) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableMod) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+// Description of a port
+type OfpPort struct {
+	PortNo uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	HwAddr []uint32 `protobuf:"varint,2,rep,packed,name=hw_addr,json=hwAddr,proto3" json:"hw_addr,omitempty"`
+	Name   string   `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	Config uint32   `protobuf:"varint,4,opt,name=config,proto3" json:"config,omitempty"`
+	State  uint32   `protobuf:"varint,5,opt,name=state,proto3" json:"state,omitempty"`
+	// Bitmaps of OFPPF_* that describe features.  All bits zeroed if
+	// unsupported or unavailable.
+	Curr                 uint32   `protobuf:"varint,6,opt,name=curr,proto3" json:"curr,omitempty"`
+	Advertised           uint32   `protobuf:"varint,7,opt,name=advertised,proto3" json:"advertised,omitempty"`
+	Supported            uint32   `protobuf:"varint,8,opt,name=supported,proto3" json:"supported,omitempty"`
+	Peer                 uint32   `protobuf:"varint,9,opt,name=peer,proto3" json:"peer,omitempty"`
+	CurrSpeed            uint32   `protobuf:"varint,10,opt,name=curr_speed,json=currSpeed,proto3" json:"curr_speed,omitempty"`
+	MaxSpeed             uint32   `protobuf:"varint,11,opt,name=max_speed,json=maxSpeed,proto3" json:"max_speed,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPort) Reset()         { *m = OfpPort{} }
+func (m *OfpPort) String() string { return proto.CompactTextString(m) }
+func (*OfpPort) ProtoMessage()    {}
+func (*OfpPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{6}
+}
+
+func (m *OfpPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPort.Unmarshal(m, b)
+}
+func (m *OfpPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPort.Marshal(b, m, deterministic)
+}
+func (m *OfpPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPort.Merge(m, src)
+}
+func (m *OfpPort) XXX_Size() int {
+	return xxx_messageInfo_OfpPort.Size(m)
+}
+func (m *OfpPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPort proto.InternalMessageInfo
+
+func (m *OfpPort) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPort) GetHwAddr() []uint32 {
+	if m != nil {
+		return m.HwAddr
+	}
+	return nil
+}
+
+func (m *OfpPort) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *OfpPort) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpPort) GetState() uint32 {
+	if m != nil {
+		return m.State
+	}
+	return 0
+}
+
+func (m *OfpPort) GetCurr() uint32 {
+	if m != nil {
+		return m.Curr
+	}
+	return 0
+}
+
+func (m *OfpPort) GetAdvertised() uint32 {
+	if m != nil {
+		return m.Advertised
+	}
+	return 0
+}
+
+func (m *OfpPort) GetSupported() uint32 {
+	if m != nil {
+		return m.Supported
+	}
+	return 0
+}
+
+func (m *OfpPort) GetPeer() uint32 {
+	if m != nil {
+		return m.Peer
+	}
+	return 0
+}
+
+func (m *OfpPort) GetCurrSpeed() uint32 {
+	if m != nil {
+		return m.CurrSpeed
+	}
+	return 0
+}
+
+func (m *OfpPort) GetMaxSpeed() uint32 {
+	if m != nil {
+		return m.MaxSpeed
+	}
+	return 0
+}
+
+// Switch features.
+type OfpSwitchFeatures struct {
+	//ofp_header header;
+	DatapathId  uint64 `protobuf:"varint,1,opt,name=datapath_id,json=datapathId,proto3" json:"datapath_id,omitempty"`
+	NBuffers    uint32 `protobuf:"varint,2,opt,name=n_buffers,json=nBuffers,proto3" json:"n_buffers,omitempty"`
+	NTables     uint32 `protobuf:"varint,3,opt,name=n_tables,json=nTables,proto3" json:"n_tables,omitempty"`
+	AuxiliaryId uint32 `protobuf:"varint,4,opt,name=auxiliary_id,json=auxiliaryId,proto3" json:"auxiliary_id,omitempty"`
+	// Features.
+	Capabilities         uint32   `protobuf:"varint,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpSwitchFeatures) Reset()         { *m = OfpSwitchFeatures{} }
+func (m *OfpSwitchFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpSwitchFeatures) ProtoMessage()    {}
+func (*OfpSwitchFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{7}
+}
+
+func (m *OfpSwitchFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpSwitchFeatures.Unmarshal(m, b)
+}
+func (m *OfpSwitchFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpSwitchFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpSwitchFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpSwitchFeatures.Merge(m, src)
+}
+func (m *OfpSwitchFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpSwitchFeatures.Size(m)
+}
+func (m *OfpSwitchFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpSwitchFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpSwitchFeatures proto.InternalMessageInfo
+
+func (m *OfpSwitchFeatures) GetDatapathId() uint64 {
+	if m != nil {
+		return m.DatapathId
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetNBuffers() uint32 {
+	if m != nil {
+		return m.NBuffers
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetNTables() uint32 {
+	if m != nil {
+		return m.NTables
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetAuxiliaryId() uint32 {
+	if m != nil {
+		return m.AuxiliaryId
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+// A physical port has changed in the datapath
+type OfpPortStatus struct {
+	//ofp_header header;
+	Reason               OfpPortReason `protobuf:"varint,1,opt,name=reason,proto3,enum=openflow_13.OfpPortReason" json:"reason,omitempty"`
+	Desc                 *OfpPort      `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OfpPortStatus) Reset()         { *m = OfpPortStatus{} }
+func (m *OfpPortStatus) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStatus) ProtoMessage()    {}
+func (*OfpPortStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{8}
+}
+
+func (m *OfpPortStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStatus.Unmarshal(m, b)
+}
+func (m *OfpPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStatus.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStatus.Merge(m, src)
+}
+func (m *OfpPortStatus) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStatus.Size(m)
+}
+func (m *OfpPortStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStatus proto.InternalMessageInfo
+
+func (m *OfpPortStatus) GetReason() OfpPortReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpPortReason_OFPPR_ADD
+}
+
+func (m *OfpPortStatus) GetDesc() *OfpPort {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+// Modify behavior of the physical port
+type OfpPortMod struct {
+	//ofp_header header;
+	PortNo uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	HwAddr []uint32 `protobuf:"varint,2,rep,packed,name=hw_addr,json=hwAddr,proto3" json:"hw_addr,omitempty"`
+	// The hardware address is not
+	//configurable.  This is used to
+	//sanity-check the request, so it must
+	//be the same as returned in an
+	//ofp_port struct.
+	Config               uint32   `protobuf:"varint,3,opt,name=config,proto3" json:"config,omitempty"`
+	Mask                 uint32   `protobuf:"varint,4,opt,name=mask,proto3" json:"mask,omitempty"`
+	Advertise            uint32   `protobuf:"varint,5,opt,name=advertise,proto3" json:"advertise,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortMod) Reset()         { *m = OfpPortMod{} }
+func (m *OfpPortMod) String() string { return proto.CompactTextString(m) }
+func (*OfpPortMod) ProtoMessage()    {}
+func (*OfpPortMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{9}
+}
+
+func (m *OfpPortMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortMod.Unmarshal(m, b)
+}
+func (m *OfpPortMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortMod.Marshal(b, m, deterministic)
+}
+func (m *OfpPortMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortMod.Merge(m, src)
+}
+func (m *OfpPortMod) XXX_Size() int {
+	return xxx_messageInfo_OfpPortMod.Size(m)
+}
+func (m *OfpPortMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortMod proto.InternalMessageInfo
+
+func (m *OfpPortMod) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetHwAddr() []uint32 {
+	if m != nil {
+		return m.HwAddr
+	}
+	return nil
+}
+
+func (m *OfpPortMod) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetMask() uint32 {
+	if m != nil {
+		return m.Mask
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetAdvertise() uint32 {
+	if m != nil {
+		return m.Advertise
+	}
+	return 0
+}
+
+// Fields to match against flows
+type OfpMatch struct {
+	Type                 OfpMatchType   `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMatchType" json:"type,omitempty"`
+	OxmFields            []*OfpOxmField `protobuf:"bytes,2,rep,name=oxm_fields,json=oxmFields,proto3" json:"oxm_fields,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *OfpMatch) Reset()         { *m = OfpMatch{} }
+func (m *OfpMatch) String() string { return proto.CompactTextString(m) }
+func (*OfpMatch) ProtoMessage()    {}
+func (*OfpMatch) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{10}
+}
+
+func (m *OfpMatch) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMatch.Unmarshal(m, b)
+}
+func (m *OfpMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMatch.Marshal(b, m, deterministic)
+}
+func (m *OfpMatch) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMatch.Merge(m, src)
+}
+func (m *OfpMatch) XXX_Size() int {
+	return xxx_messageInfo_OfpMatch.Size(m)
+}
+func (m *OfpMatch) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMatch proto.InternalMessageInfo
+
+func (m *OfpMatch) GetType() OfpMatchType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMatchType_OFPMT_STANDARD
+}
+
+func (m *OfpMatch) GetOxmFields() []*OfpOxmField {
+	if m != nil {
+		return m.OxmFields
+	}
+	return nil
+}
+
+// OXM Flow match fields
+type OfpOxmField struct {
+	OxmClass OfpOxmClass `protobuf:"varint,1,opt,name=oxm_class,json=oxmClass,proto3,enum=openflow_13.OfpOxmClass" json:"oxm_class,omitempty"`
+	// Types that are valid to be assigned to Field:
+	//	*OfpOxmField_OfbField
+	//	*OfpOxmField_ExperimenterField
+	Field                isOfpOxmField_Field `protobuf_oneof:"field"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpOxmField) Reset()         { *m = OfpOxmField{} }
+func (m *OfpOxmField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmField) ProtoMessage()    {}
+func (*OfpOxmField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{11}
+}
+
+func (m *OfpOxmField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmField.Unmarshal(m, b)
+}
+func (m *OfpOxmField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmField.Merge(m, src)
+}
+func (m *OfpOxmField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmField.Size(m)
+}
+func (m *OfpOxmField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmField proto.InternalMessageInfo
+
+func (m *OfpOxmField) GetOxmClass() OfpOxmClass {
+	if m != nil {
+		return m.OxmClass
+	}
+	return OfpOxmClass_OFPXMC_NXM_0
+}
+
+type isOfpOxmField_Field interface {
+	isOfpOxmField_Field()
+}
+
+type OfpOxmField_OfbField struct {
+	OfbField *OfpOxmOfbField `protobuf:"bytes,4,opt,name=ofb_field,json=ofbField,proto3,oneof"`
+}
+
+type OfpOxmField_ExperimenterField struct {
+	ExperimenterField *OfpOxmExperimenterField `protobuf:"bytes,5,opt,name=experimenter_field,json=experimenterField,proto3,oneof"`
+}
+
+func (*OfpOxmField_OfbField) isOfpOxmField_Field() {}
+
+func (*OfpOxmField_ExperimenterField) isOfpOxmField_Field() {}
+
+func (m *OfpOxmField) GetField() isOfpOxmField_Field {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *OfpOxmField) GetOfbField() *OfpOxmOfbField {
+	if x, ok := m.GetField().(*OfpOxmField_OfbField); ok {
+		return x.OfbField
+	}
+	return nil
+}
+
+func (m *OfpOxmField) GetExperimenterField() *OfpOxmExperimenterField {
+	if x, ok := m.GetField().(*OfpOxmField_ExperimenterField); ok {
+		return x.ExperimenterField
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpOxmField) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpOxmField_OfbField)(nil),
+		(*OfpOxmField_ExperimenterField)(nil),
+	}
+}
+
+// OXM OpenFlow Basic Match Field
+type OfpOxmOfbField struct {
+	Type    OxmOfbFieldTypes `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OxmOfbFieldTypes" json:"type,omitempty"`
+	HasMask bool             `protobuf:"varint,2,opt,name=has_mask,json=hasMask,proto3" json:"has_mask,omitempty"`
+	// Types that are valid to be assigned to Value:
+	//	*OfpOxmOfbField_Port
+	//	*OfpOxmOfbField_PhysicalPort
+	//	*OfpOxmOfbField_TableMetadata
+	//	*OfpOxmOfbField_EthDst
+	//	*OfpOxmOfbField_EthSrc
+	//	*OfpOxmOfbField_EthType
+	//	*OfpOxmOfbField_VlanVid
+	//	*OfpOxmOfbField_VlanPcp
+	//	*OfpOxmOfbField_IpDscp
+	//	*OfpOxmOfbField_IpEcn
+	//	*OfpOxmOfbField_IpProto
+	//	*OfpOxmOfbField_Ipv4Src
+	//	*OfpOxmOfbField_Ipv4Dst
+	//	*OfpOxmOfbField_TcpSrc
+	//	*OfpOxmOfbField_TcpDst
+	//	*OfpOxmOfbField_UdpSrc
+	//	*OfpOxmOfbField_UdpDst
+	//	*OfpOxmOfbField_SctpSrc
+	//	*OfpOxmOfbField_SctpDst
+	//	*OfpOxmOfbField_Icmpv4Type
+	//	*OfpOxmOfbField_Icmpv4Code
+	//	*OfpOxmOfbField_ArpOp
+	//	*OfpOxmOfbField_ArpSpa
+	//	*OfpOxmOfbField_ArpTpa
+	//	*OfpOxmOfbField_ArpSha
+	//	*OfpOxmOfbField_ArpTha
+	//	*OfpOxmOfbField_Ipv6Src
+	//	*OfpOxmOfbField_Ipv6Dst
+	//	*OfpOxmOfbField_Ipv6Flabel
+	//	*OfpOxmOfbField_Icmpv6Type
+	//	*OfpOxmOfbField_Icmpv6Code
+	//	*OfpOxmOfbField_Ipv6NdTarget
+	//	*OfpOxmOfbField_Ipv6NdSsl
+	//	*OfpOxmOfbField_Ipv6NdTll
+	//	*OfpOxmOfbField_MplsLabel
+	//	*OfpOxmOfbField_MplsTc
+	//	*OfpOxmOfbField_MplsBos
+	//	*OfpOxmOfbField_PbbIsid
+	//	*OfpOxmOfbField_TunnelId
+	//	*OfpOxmOfbField_Ipv6Exthdr
+	Value isOfpOxmOfbField_Value `protobuf_oneof:"value"`
+	// Optional mask values (must be present when has_mask is true
+	//
+	// Types that are valid to be assigned to Mask:
+	//	*OfpOxmOfbField_TableMetadataMask
+	//	*OfpOxmOfbField_EthDstMask
+	//	*OfpOxmOfbField_EthSrcMask
+	//	*OfpOxmOfbField_VlanVidMask
+	//	*OfpOxmOfbField_Ipv4SrcMask
+	//	*OfpOxmOfbField_Ipv4DstMask
+	//	*OfpOxmOfbField_ArpSpaMask
+	//	*OfpOxmOfbField_ArpTpaMask
+	//	*OfpOxmOfbField_Ipv6SrcMask
+	//	*OfpOxmOfbField_Ipv6DstMask
+	//	*OfpOxmOfbField_Ipv6FlabelMask
+	//	*OfpOxmOfbField_PbbIsidMask
+	//	*OfpOxmOfbField_TunnelIdMask
+	//	*OfpOxmOfbField_Ipv6ExthdrMask
+	Mask                 isOfpOxmOfbField_Mask `protobuf_oneof:"mask"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpOxmOfbField) Reset()         { *m = OfpOxmOfbField{} }
+func (m *OfpOxmOfbField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmOfbField) ProtoMessage()    {}
+func (*OfpOxmOfbField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{12}
+}
+
+func (m *OfpOxmOfbField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmOfbField.Unmarshal(m, b)
+}
+func (m *OfpOxmOfbField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmOfbField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmOfbField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmOfbField.Merge(m, src)
+}
+func (m *OfpOxmOfbField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmOfbField.Size(m)
+}
+func (m *OfpOxmOfbField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmOfbField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmOfbField proto.InternalMessageInfo
+
+func (m *OfpOxmOfbField) GetType() OxmOfbFieldTypes {
+	if m != nil {
+		return m.Type
+	}
+	return OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+}
+
+func (m *OfpOxmOfbField) GetHasMask() bool {
+	if m != nil {
+		return m.HasMask
+	}
+	return false
+}
+
+type isOfpOxmOfbField_Value interface {
+	isOfpOxmOfbField_Value()
+}
+
+type OfpOxmOfbField_Port struct {
+	Port uint32 `protobuf:"varint,3,opt,name=port,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PhysicalPort struct {
+	PhysicalPort uint32 `protobuf:"varint,4,opt,name=physical_port,json=physicalPort,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TableMetadata struct {
+	TableMetadata uint64 `protobuf:"varint,5,opt,name=table_metadata,json=tableMetadata,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthDst struct {
+	EthDst []byte `protobuf:"bytes,6,opt,name=eth_dst,json=ethDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthSrc struct {
+	EthSrc []byte `protobuf:"bytes,7,opt,name=eth_src,json=ethSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthType struct {
+	EthType uint32 `protobuf:"varint,8,opt,name=eth_type,json=ethType,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanVid struct {
+	VlanVid uint32 `protobuf:"varint,9,opt,name=vlan_vid,json=vlanVid,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanPcp struct {
+	VlanPcp uint32 `protobuf:"varint,10,opt,name=vlan_pcp,json=vlanPcp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpDscp struct {
+	IpDscp uint32 `protobuf:"varint,11,opt,name=ip_dscp,json=ipDscp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpEcn struct {
+	IpEcn uint32 `protobuf:"varint,12,opt,name=ip_ecn,json=ipEcn,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpProto struct {
+	IpProto uint32 `protobuf:"varint,13,opt,name=ip_proto,json=ipProto,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4Src struct {
+	Ipv4Src uint32 `protobuf:"varint,14,opt,name=ipv4_src,json=ipv4Src,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4Dst struct {
+	Ipv4Dst uint32 `protobuf:"varint,15,opt,name=ipv4_dst,json=ipv4Dst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TcpSrc struct {
+	TcpSrc uint32 `protobuf:"varint,16,opt,name=tcp_src,json=tcpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TcpDst struct {
+	TcpDst uint32 `protobuf:"varint,17,opt,name=tcp_dst,json=tcpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_UdpSrc struct {
+	UdpSrc uint32 `protobuf:"varint,18,opt,name=udp_src,json=udpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_UdpDst struct {
+	UdpDst uint32 `protobuf:"varint,19,opt,name=udp_dst,json=udpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_SctpSrc struct {
+	SctpSrc uint32 `protobuf:"varint,20,opt,name=sctp_src,json=sctpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_SctpDst struct {
+	SctpDst uint32 `protobuf:"varint,21,opt,name=sctp_dst,json=sctpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv4Type struct {
+	Icmpv4Type uint32 `protobuf:"varint,22,opt,name=icmpv4_type,json=icmpv4Type,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv4Code struct {
+	Icmpv4Code uint32 `protobuf:"varint,23,opt,name=icmpv4_code,json=icmpv4Code,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpOp struct {
+	ArpOp uint32 `protobuf:"varint,24,opt,name=arp_op,json=arpOp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSpa struct {
+	ArpSpa uint32 `protobuf:"varint,25,opt,name=arp_spa,json=arpSpa,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTpa struct {
+	ArpTpa uint32 `protobuf:"varint,26,opt,name=arp_tpa,json=arpTpa,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSha struct {
+	ArpSha []byte `protobuf:"bytes,27,opt,name=arp_sha,json=arpSha,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTha struct {
+	ArpTha []byte `protobuf:"bytes,28,opt,name=arp_tha,json=arpTha,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Src struct {
+	Ipv6Src []byte `protobuf:"bytes,29,opt,name=ipv6_src,json=ipv6Src,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Dst struct {
+	Ipv6Dst []byte `protobuf:"bytes,30,opt,name=ipv6_dst,json=ipv6Dst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Flabel struct {
+	Ipv6Flabel uint32 `protobuf:"varint,31,opt,name=ipv6_flabel,json=ipv6Flabel,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv6Type struct {
+	Icmpv6Type uint32 `protobuf:"varint,32,opt,name=icmpv6_type,json=icmpv6Type,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv6Code struct {
+	Icmpv6Code uint32 `protobuf:"varint,33,opt,name=icmpv6_code,json=icmpv6Code,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdTarget struct {
+	Ipv6NdTarget []byte `protobuf:"bytes,34,opt,name=ipv6_nd_target,json=ipv6NdTarget,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdSsl struct {
+	Ipv6NdSsl []byte `protobuf:"bytes,35,opt,name=ipv6_nd_ssl,json=ipv6NdSsl,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdTll struct {
+	Ipv6NdTll []byte `protobuf:"bytes,36,opt,name=ipv6_nd_tll,json=ipv6NdTll,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsLabel struct {
+	MplsLabel uint32 `protobuf:"varint,37,opt,name=mpls_label,json=mplsLabel,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsTc struct {
+	MplsTc uint32 `protobuf:"varint,38,opt,name=mpls_tc,json=mplsTc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsBos struct {
+	MplsBos uint32 `protobuf:"varint,39,opt,name=mpls_bos,json=mplsBos,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PbbIsid struct {
+	PbbIsid uint32 `protobuf:"varint,40,opt,name=pbb_isid,json=pbbIsid,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TunnelId struct {
+	TunnelId uint64 `protobuf:"varint,41,opt,name=tunnel_id,json=tunnelId,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Exthdr struct {
+	Ipv6Exthdr uint32 `protobuf:"varint,42,opt,name=ipv6_exthdr,json=ipv6Exthdr,proto3,oneof"`
+}
+
+func (*OfpOxmOfbField_Port) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_PhysicalPort) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TableMetadata) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthType) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_VlanVid) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_VlanPcp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpDscp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpEcn) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpProto) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv4Src) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv4Dst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TcpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TcpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_UdpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_UdpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_SctpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_SctpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv4Type) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv4Code) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpOp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpSpa) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpTpa) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpSha) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpTha) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Src) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Dst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Flabel) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv6Type) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv6Code) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdTarget) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdSsl) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdTll) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsLabel) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsTc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsBos) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_PbbIsid) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TunnelId) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Exthdr) isOfpOxmOfbField_Value() {}
+
+func (m *OfpOxmOfbField) GetValue() isOfpOxmOfbField_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetPort() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Port); ok {
+		return x.Port
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPhysicalPort() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_PhysicalPort); ok {
+		return x.PhysicalPort
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTableMetadata() uint64 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TableMetadata); ok {
+		return x.TableMetadata
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetEthDst() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthDst); ok {
+		return x.EthDst
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthSrc() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthSrc); ok {
+		return x.EthSrc
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthType() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthType); ok {
+		return x.EthType
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetVlanVid() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_VlanVid); ok {
+		return x.VlanVid
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetVlanPcp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_VlanPcp); ok {
+		return x.VlanPcp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpDscp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpDscp); ok {
+		return x.IpDscp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpEcn() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpEcn); ok {
+		return x.IpEcn
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpProto() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpProto); ok {
+		return x.IpProto
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4Src() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv4Src); ok {
+		return x.Ipv4Src
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4Dst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv4Dst); ok {
+		return x.Ipv4Dst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTcpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TcpSrc); ok {
+		return x.TcpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTcpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TcpDst); ok {
+		return x.TcpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetUdpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_UdpSrc); ok {
+		return x.UdpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetUdpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_UdpDst); ok {
+		return x.UdpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetSctpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_SctpSrc); ok {
+		return x.SctpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetSctpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_SctpDst); ok {
+		return x.SctpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv4Type() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv4Type); ok {
+		return x.Icmpv4Type
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv4Code() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv4Code); ok {
+		return x.Icmpv4Code
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpOp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpOp); ok {
+		return x.ArpOp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSpa() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpSpa); ok {
+		return x.ArpSpa
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpTpa() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpTpa); ok {
+		return x.ArpTpa
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSha() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpSha); ok {
+		return x.ArpSha
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetArpTha() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpTha); ok {
+		return x.ArpTha
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Src() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Src); ok {
+		return x.Ipv6Src
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Dst() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Dst); ok {
+		return x.Ipv6Dst
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Flabel() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Flabel); ok {
+		return x.Ipv6Flabel
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv6Type() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv6Type); ok {
+		return x.Icmpv6Type
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv6Code() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv6Code); ok {
+		return x.Icmpv6Code
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdTarget() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdTarget); ok {
+		return x.Ipv6NdTarget
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdSsl() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdSsl); ok {
+		return x.Ipv6NdSsl
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdTll() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdTll); ok {
+		return x.Ipv6NdTll
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetMplsLabel() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsLabel); ok {
+		return x.MplsLabel
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetMplsTc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsTc); ok {
+		return x.MplsTc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetMplsBos() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsBos); ok {
+		return x.MplsBos
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPbbIsid() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_PbbIsid); ok {
+		return x.PbbIsid
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTunnelId() uint64 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TunnelId); ok {
+		return x.TunnelId
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6Exthdr() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Exthdr); ok {
+		return x.Ipv6Exthdr
+	}
+	return 0
+}
+
+type isOfpOxmOfbField_Mask interface {
+	isOfpOxmOfbField_Mask()
+}
+
+type OfpOxmOfbField_TableMetadataMask struct {
+	TableMetadataMask uint64 `protobuf:"varint,105,opt,name=table_metadata_mask,json=tableMetadataMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthDstMask struct {
+	EthDstMask []byte `protobuf:"bytes,106,opt,name=eth_dst_mask,json=ethDstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthSrcMask struct {
+	EthSrcMask []byte `protobuf:"bytes,107,opt,name=eth_src_mask,json=ethSrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanVidMask struct {
+	VlanVidMask uint32 `protobuf:"varint,109,opt,name=vlan_vid_mask,json=vlanVidMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4SrcMask struct {
+	Ipv4SrcMask uint32 `protobuf:"varint,114,opt,name=ipv4_src_mask,json=ipv4SrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4DstMask struct {
+	Ipv4DstMask uint32 `protobuf:"varint,115,opt,name=ipv4_dst_mask,json=ipv4DstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSpaMask struct {
+	ArpSpaMask uint32 `protobuf:"varint,125,opt,name=arp_spa_mask,json=arpSpaMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTpaMask struct {
+	ArpTpaMask uint32 `protobuf:"varint,126,opt,name=arp_tpa_mask,json=arpTpaMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6SrcMask struct {
+	Ipv6SrcMask []byte `protobuf:"bytes,129,opt,name=ipv6_src_mask,json=ipv6SrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6DstMask struct {
+	Ipv6DstMask []byte `protobuf:"bytes,130,opt,name=ipv6_dst_mask,json=ipv6DstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6FlabelMask struct {
+	Ipv6FlabelMask uint32 `protobuf:"varint,131,opt,name=ipv6_flabel_mask,json=ipv6FlabelMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PbbIsidMask struct {
+	PbbIsidMask uint32 `protobuf:"varint,140,opt,name=pbb_isid_mask,json=pbbIsidMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TunnelIdMask struct {
+	TunnelIdMask uint64 `protobuf:"varint,141,opt,name=tunnel_id_mask,json=tunnelIdMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6ExthdrMask struct {
+	Ipv6ExthdrMask uint32 `protobuf:"varint,142,opt,name=ipv6_exthdr_mask,json=ipv6ExthdrMask,proto3,oneof"`
+}
+
+func (*OfpOxmOfbField_TableMetadataMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_EthDstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_EthSrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_VlanVidMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv4SrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv4DstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_ArpSpaMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_ArpTpaMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6SrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6DstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6FlabelMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_PbbIsidMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_TunnelIdMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6ExthdrMask) isOfpOxmOfbField_Mask() {}
+
+func (m *OfpOxmOfbField) GetMask() isOfpOxmOfbField_Mask {
+	if m != nil {
+		return m.Mask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetTableMetadataMask() uint64 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_TableMetadataMask); ok {
+		return x.TableMetadataMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetEthDstMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_EthDstMask); ok {
+		return x.EthDstMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthSrcMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_EthSrcMask); ok {
+		return x.EthSrcMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetVlanVidMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_VlanVidMask); ok {
+		return x.VlanVidMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4SrcMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv4SrcMask); ok {
+		return x.Ipv4SrcMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4DstMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv4DstMask); ok {
+		return x.Ipv4DstMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSpaMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_ArpSpaMask); ok {
+		return x.ArpSpaMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpTpaMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_ArpTpaMask); ok {
+		return x.ArpTpaMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6SrcMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6SrcMask); ok {
+		return x.Ipv6SrcMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6DstMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6DstMask); ok {
+		return x.Ipv6DstMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6FlabelMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6FlabelMask); ok {
+		return x.Ipv6FlabelMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPbbIsidMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_PbbIsidMask); ok {
+		return x.PbbIsidMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTunnelIdMask() uint64 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_TunnelIdMask); ok {
+		return x.TunnelIdMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6ExthdrMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6ExthdrMask); ok {
+		return x.Ipv6ExthdrMask
+	}
+	return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpOxmOfbField) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpOxmOfbField_Port)(nil),
+		(*OfpOxmOfbField_PhysicalPort)(nil),
+		(*OfpOxmOfbField_TableMetadata)(nil),
+		(*OfpOxmOfbField_EthDst)(nil),
+		(*OfpOxmOfbField_EthSrc)(nil),
+		(*OfpOxmOfbField_EthType)(nil),
+		(*OfpOxmOfbField_VlanVid)(nil),
+		(*OfpOxmOfbField_VlanPcp)(nil),
+		(*OfpOxmOfbField_IpDscp)(nil),
+		(*OfpOxmOfbField_IpEcn)(nil),
+		(*OfpOxmOfbField_IpProto)(nil),
+		(*OfpOxmOfbField_Ipv4Src)(nil),
+		(*OfpOxmOfbField_Ipv4Dst)(nil),
+		(*OfpOxmOfbField_TcpSrc)(nil),
+		(*OfpOxmOfbField_TcpDst)(nil),
+		(*OfpOxmOfbField_UdpSrc)(nil),
+		(*OfpOxmOfbField_UdpDst)(nil),
+		(*OfpOxmOfbField_SctpSrc)(nil),
+		(*OfpOxmOfbField_SctpDst)(nil),
+		(*OfpOxmOfbField_Icmpv4Type)(nil),
+		(*OfpOxmOfbField_Icmpv4Code)(nil),
+		(*OfpOxmOfbField_ArpOp)(nil),
+		(*OfpOxmOfbField_ArpSpa)(nil),
+		(*OfpOxmOfbField_ArpTpa)(nil),
+		(*OfpOxmOfbField_ArpSha)(nil),
+		(*OfpOxmOfbField_ArpTha)(nil),
+		(*OfpOxmOfbField_Ipv6Src)(nil),
+		(*OfpOxmOfbField_Ipv6Dst)(nil),
+		(*OfpOxmOfbField_Ipv6Flabel)(nil),
+		(*OfpOxmOfbField_Icmpv6Type)(nil),
+		(*OfpOxmOfbField_Icmpv6Code)(nil),
+		(*OfpOxmOfbField_Ipv6NdTarget)(nil),
+		(*OfpOxmOfbField_Ipv6NdSsl)(nil),
+		(*OfpOxmOfbField_Ipv6NdTll)(nil),
+		(*OfpOxmOfbField_MplsLabel)(nil),
+		(*OfpOxmOfbField_MplsTc)(nil),
+		(*OfpOxmOfbField_MplsBos)(nil),
+		(*OfpOxmOfbField_PbbIsid)(nil),
+		(*OfpOxmOfbField_TunnelId)(nil),
+		(*OfpOxmOfbField_Ipv6Exthdr)(nil),
+		(*OfpOxmOfbField_TableMetadataMask)(nil),
+		(*OfpOxmOfbField_EthDstMask)(nil),
+		(*OfpOxmOfbField_EthSrcMask)(nil),
+		(*OfpOxmOfbField_VlanVidMask)(nil),
+		(*OfpOxmOfbField_Ipv4SrcMask)(nil),
+		(*OfpOxmOfbField_Ipv4DstMask)(nil),
+		(*OfpOxmOfbField_ArpSpaMask)(nil),
+		(*OfpOxmOfbField_ArpTpaMask)(nil),
+		(*OfpOxmOfbField_Ipv6SrcMask)(nil),
+		(*OfpOxmOfbField_Ipv6DstMask)(nil),
+		(*OfpOxmOfbField_Ipv6FlabelMask)(nil),
+		(*OfpOxmOfbField_PbbIsidMask)(nil),
+		(*OfpOxmOfbField_TunnelIdMask)(nil),
+		(*OfpOxmOfbField_Ipv6ExthdrMask)(nil),
+	}
+}
+
+// Header for OXM experimenter match fields.
+// The experimenter class should not use OXM_HEADER() macros for defining
+// fields due to this extra header.
+type OfpOxmExperimenterField struct {
+	OxmHeader            uint32   `protobuf:"varint,1,opt,name=oxm_header,json=oxmHeader,proto3" json:"oxm_header,omitempty"`
+	Experimenter         uint32   `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpOxmExperimenterField) Reset()         { *m = OfpOxmExperimenterField{} }
+func (m *OfpOxmExperimenterField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmExperimenterField) ProtoMessage()    {}
+func (*OfpOxmExperimenterField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{13}
+}
+
+func (m *OfpOxmExperimenterField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmExperimenterField.Unmarshal(m, b)
+}
+func (m *OfpOxmExperimenterField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmExperimenterField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmExperimenterField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmExperimenterField.Merge(m, src)
+}
+func (m *OfpOxmExperimenterField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmExperimenterField.Size(m)
+}
+func (m *OfpOxmExperimenterField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmExperimenterField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmExperimenterField proto.InternalMessageInfo
+
+func (m *OfpOxmExperimenterField) GetOxmHeader() uint32 {
+	if m != nil {
+		return m.OxmHeader
+	}
+	return 0
+}
+
+func (m *OfpOxmExperimenterField) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+// Action header that is common to all actions.  The length includes the
+// header and any padding used to make the action 64-bit aligned.
+// NB: The length of an action *must* always be a multiple of eight.
+type OfpAction struct {
+	Type OfpActionType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpActionType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Action:
+	//	*OfpAction_Output
+	//	*OfpAction_MplsTtl
+	//	*OfpAction_Push
+	//	*OfpAction_PopMpls
+	//	*OfpAction_Group
+	//	*OfpAction_NwTtl
+	//	*OfpAction_SetField
+	//	*OfpAction_Experimenter
+	Action               isOfpAction_Action `protobuf_oneof:"action"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *OfpAction) Reset()         { *m = OfpAction{} }
+func (m *OfpAction) String() string { return proto.CompactTextString(m) }
+func (*OfpAction) ProtoMessage()    {}
+func (*OfpAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{14}
+}
+
+func (m *OfpAction) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAction.Unmarshal(m, b)
+}
+func (m *OfpAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAction.Marshal(b, m, deterministic)
+}
+func (m *OfpAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAction.Merge(m, src)
+}
+func (m *OfpAction) XXX_Size() int {
+	return xxx_messageInfo_OfpAction.Size(m)
+}
+func (m *OfpAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAction proto.InternalMessageInfo
+
+func (m *OfpAction) GetType() OfpActionType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpActionType_OFPAT_OUTPUT
+}
+
+type isOfpAction_Action interface {
+	isOfpAction_Action()
+}
+
+type OfpAction_Output struct {
+	Output *OfpActionOutput `protobuf:"bytes,2,opt,name=output,proto3,oneof"`
+}
+
+type OfpAction_MplsTtl struct {
+	MplsTtl *OfpActionMplsTtl `protobuf:"bytes,3,opt,name=mpls_ttl,json=mplsTtl,proto3,oneof"`
+}
+
+type OfpAction_Push struct {
+	Push *OfpActionPush `protobuf:"bytes,4,opt,name=push,proto3,oneof"`
+}
+
+type OfpAction_PopMpls struct {
+	PopMpls *OfpActionPopMpls `protobuf:"bytes,5,opt,name=pop_mpls,json=popMpls,proto3,oneof"`
+}
+
+type OfpAction_Group struct {
+	Group *OfpActionGroup `protobuf:"bytes,6,opt,name=group,proto3,oneof"`
+}
+
+type OfpAction_NwTtl struct {
+	NwTtl *OfpActionNwTtl `protobuf:"bytes,7,opt,name=nw_ttl,json=nwTtl,proto3,oneof"`
+}
+
+type OfpAction_SetField struct {
+	SetField *OfpActionSetField `protobuf:"bytes,8,opt,name=set_field,json=setField,proto3,oneof"`
+}
+
+type OfpAction_Experimenter struct {
+	Experimenter *OfpActionExperimenter `protobuf:"bytes,9,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpAction_Output) isOfpAction_Action() {}
+
+func (*OfpAction_MplsTtl) isOfpAction_Action() {}
+
+func (*OfpAction_Push) isOfpAction_Action() {}
+
+func (*OfpAction_PopMpls) isOfpAction_Action() {}
+
+func (*OfpAction_Group) isOfpAction_Action() {}
+
+func (*OfpAction_NwTtl) isOfpAction_Action() {}
+
+func (*OfpAction_SetField) isOfpAction_Action() {}
+
+func (*OfpAction_Experimenter) isOfpAction_Action() {}
+
+func (m *OfpAction) GetAction() isOfpAction_Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+func (m *OfpAction) GetOutput() *OfpActionOutput {
+	if x, ok := m.GetAction().(*OfpAction_Output); ok {
+		return x.Output
+	}
+	return nil
+}
+
+func (m *OfpAction) GetMplsTtl() *OfpActionMplsTtl {
+	if x, ok := m.GetAction().(*OfpAction_MplsTtl); ok {
+		return x.MplsTtl
+	}
+	return nil
+}
+
+func (m *OfpAction) GetPush() *OfpActionPush {
+	if x, ok := m.GetAction().(*OfpAction_Push); ok {
+		return x.Push
+	}
+	return nil
+}
+
+func (m *OfpAction) GetPopMpls() *OfpActionPopMpls {
+	if x, ok := m.GetAction().(*OfpAction_PopMpls); ok {
+		return x.PopMpls
+	}
+	return nil
+}
+
+func (m *OfpAction) GetGroup() *OfpActionGroup {
+	if x, ok := m.GetAction().(*OfpAction_Group); ok {
+		return x.Group
+	}
+	return nil
+}
+
+func (m *OfpAction) GetNwTtl() *OfpActionNwTtl {
+	if x, ok := m.GetAction().(*OfpAction_NwTtl); ok {
+		return x.NwTtl
+	}
+	return nil
+}
+
+func (m *OfpAction) GetSetField() *OfpActionSetField {
+	if x, ok := m.GetAction().(*OfpAction_SetField); ok {
+		return x.SetField
+	}
+	return nil
+}
+
+func (m *OfpAction) GetExperimenter() *OfpActionExperimenter {
+	if x, ok := m.GetAction().(*OfpAction_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpAction) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpAction_Output)(nil),
+		(*OfpAction_MplsTtl)(nil),
+		(*OfpAction_Push)(nil),
+		(*OfpAction_PopMpls)(nil),
+		(*OfpAction_Group)(nil),
+		(*OfpAction_NwTtl)(nil),
+		(*OfpAction_SetField)(nil),
+		(*OfpAction_Experimenter)(nil),
+	}
+}
+
+// Action structure for OFPAT_OUTPUT, which sends packets out 'port'.
+// When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
+// number of bytes to send.  A 'max_len' of zero means no bytes of the
+// packet should be sent. A 'max_len' of OFPCML_NO_BUFFER means that
+// the packet is not buffered and the complete packet is to be sent to
+// the controller.
+type OfpActionOutput struct {
+	Port                 uint32   `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	MaxLen               uint32   `protobuf:"varint,2,opt,name=max_len,json=maxLen,proto3" json:"max_len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionOutput) Reset()         { *m = OfpActionOutput{} }
+func (m *OfpActionOutput) String() string { return proto.CompactTextString(m) }
+func (*OfpActionOutput) ProtoMessage()    {}
+func (*OfpActionOutput) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{15}
+}
+
+func (m *OfpActionOutput) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionOutput.Unmarshal(m, b)
+}
+func (m *OfpActionOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionOutput.Marshal(b, m, deterministic)
+}
+func (m *OfpActionOutput) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionOutput.Merge(m, src)
+}
+func (m *OfpActionOutput) XXX_Size() int {
+	return xxx_messageInfo_OfpActionOutput.Size(m)
+}
+func (m *OfpActionOutput) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionOutput.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionOutput proto.InternalMessageInfo
+
+func (m *OfpActionOutput) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpActionOutput) GetMaxLen() uint32 {
+	if m != nil {
+		return m.MaxLen
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_MPLS_TTL.
+type OfpActionMplsTtl struct {
+	MplsTtl              uint32   `protobuf:"varint,1,opt,name=mpls_ttl,json=mplsTtl,proto3" json:"mpls_ttl,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionMplsTtl) Reset()         { *m = OfpActionMplsTtl{} }
+func (m *OfpActionMplsTtl) String() string { return proto.CompactTextString(m) }
+func (*OfpActionMplsTtl) ProtoMessage()    {}
+func (*OfpActionMplsTtl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{16}
+}
+
+func (m *OfpActionMplsTtl) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionMplsTtl.Unmarshal(m, b)
+}
+func (m *OfpActionMplsTtl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionMplsTtl.Marshal(b, m, deterministic)
+}
+func (m *OfpActionMplsTtl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionMplsTtl.Merge(m, src)
+}
+func (m *OfpActionMplsTtl) XXX_Size() int {
+	return xxx_messageInfo_OfpActionMplsTtl.Size(m)
+}
+func (m *OfpActionMplsTtl) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionMplsTtl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionMplsTtl proto.InternalMessageInfo
+
+func (m *OfpActionMplsTtl) GetMplsTtl() uint32 {
+	if m != nil {
+		return m.MplsTtl
+	}
+	return 0
+}
+
+// Action structure for OFPAT_PUSH_VLAN/MPLS/PBB.
+type OfpActionPush struct {
+	Ethertype            uint32   `protobuf:"varint,1,opt,name=ethertype,proto3" json:"ethertype,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionPush) Reset()         { *m = OfpActionPush{} }
+func (m *OfpActionPush) String() string { return proto.CompactTextString(m) }
+func (*OfpActionPush) ProtoMessage()    {}
+func (*OfpActionPush) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{17}
+}
+
+func (m *OfpActionPush) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionPush.Unmarshal(m, b)
+}
+func (m *OfpActionPush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionPush.Marshal(b, m, deterministic)
+}
+func (m *OfpActionPush) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionPush.Merge(m, src)
+}
+func (m *OfpActionPush) XXX_Size() int {
+	return xxx_messageInfo_OfpActionPush.Size(m)
+}
+func (m *OfpActionPush) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionPush.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionPush proto.InternalMessageInfo
+
+func (m *OfpActionPush) GetEthertype() uint32 {
+	if m != nil {
+		return m.Ethertype
+	}
+	return 0
+}
+
+// Action structure for OFPAT_POP_MPLS.
+type OfpActionPopMpls struct {
+	Ethertype            uint32   `protobuf:"varint,1,opt,name=ethertype,proto3" json:"ethertype,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionPopMpls) Reset()         { *m = OfpActionPopMpls{} }
+func (m *OfpActionPopMpls) String() string { return proto.CompactTextString(m) }
+func (*OfpActionPopMpls) ProtoMessage()    {}
+func (*OfpActionPopMpls) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{18}
+}
+
+func (m *OfpActionPopMpls) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionPopMpls.Unmarshal(m, b)
+}
+func (m *OfpActionPopMpls) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionPopMpls.Marshal(b, m, deterministic)
+}
+func (m *OfpActionPopMpls) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionPopMpls.Merge(m, src)
+}
+func (m *OfpActionPopMpls) XXX_Size() int {
+	return xxx_messageInfo_OfpActionPopMpls.Size(m)
+}
+func (m *OfpActionPopMpls) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionPopMpls.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionPopMpls proto.InternalMessageInfo
+
+func (m *OfpActionPopMpls) GetEthertype() uint32 {
+	if m != nil {
+		return m.Ethertype
+	}
+	return 0
+}
+
+// Action structure for OFPAT_GROUP.
+type OfpActionGroup struct {
+	GroupId              uint32   `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionGroup) Reset()         { *m = OfpActionGroup{} }
+func (m *OfpActionGroup) String() string { return proto.CompactTextString(m) }
+func (*OfpActionGroup) ProtoMessage()    {}
+func (*OfpActionGroup) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{19}
+}
+
+func (m *OfpActionGroup) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionGroup.Unmarshal(m, b)
+}
+func (m *OfpActionGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionGroup.Marshal(b, m, deterministic)
+}
+func (m *OfpActionGroup) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionGroup.Merge(m, src)
+}
+func (m *OfpActionGroup) XXX_Size() int {
+	return xxx_messageInfo_OfpActionGroup.Size(m)
+}
+func (m *OfpActionGroup) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionGroup proto.InternalMessageInfo
+
+func (m *OfpActionGroup) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_NW_TTL.
+type OfpActionNwTtl struct {
+	NwTtl                uint32   `protobuf:"varint,1,opt,name=nw_ttl,json=nwTtl,proto3" json:"nw_ttl,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionNwTtl) Reset()         { *m = OfpActionNwTtl{} }
+func (m *OfpActionNwTtl) String() string { return proto.CompactTextString(m) }
+func (*OfpActionNwTtl) ProtoMessage()    {}
+func (*OfpActionNwTtl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{20}
+}
+
+func (m *OfpActionNwTtl) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionNwTtl.Unmarshal(m, b)
+}
+func (m *OfpActionNwTtl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionNwTtl.Marshal(b, m, deterministic)
+}
+func (m *OfpActionNwTtl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionNwTtl.Merge(m, src)
+}
+func (m *OfpActionNwTtl) XXX_Size() int {
+	return xxx_messageInfo_OfpActionNwTtl.Size(m)
+}
+func (m *OfpActionNwTtl) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionNwTtl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionNwTtl proto.InternalMessageInfo
+
+func (m *OfpActionNwTtl) GetNwTtl() uint32 {
+	if m != nil {
+		return m.NwTtl
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_FIELD.
+type OfpActionSetField struct {
+	Field                *OfpOxmField `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpActionSetField) Reset()         { *m = OfpActionSetField{} }
+func (m *OfpActionSetField) String() string { return proto.CompactTextString(m) }
+func (*OfpActionSetField) ProtoMessage()    {}
+func (*OfpActionSetField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{21}
+}
+
+func (m *OfpActionSetField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionSetField.Unmarshal(m, b)
+}
+func (m *OfpActionSetField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionSetField.Marshal(b, m, deterministic)
+}
+func (m *OfpActionSetField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionSetField.Merge(m, src)
+}
+func (m *OfpActionSetField) XXX_Size() int {
+	return xxx_messageInfo_OfpActionSetField.Size(m)
+}
+func (m *OfpActionSetField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionSetField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionSetField proto.InternalMessageInfo
+
+func (m *OfpActionSetField) GetField() *OfpOxmField {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+// Action header for OFPAT_EXPERIMENTER.
+// The rest of the body is experimenter-defined.
+type OfpActionExperimenter struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionExperimenter) Reset()         { *m = OfpActionExperimenter{} }
+func (m *OfpActionExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpActionExperimenter) ProtoMessage()    {}
+func (*OfpActionExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{22}
+}
+
+func (m *OfpActionExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionExperimenter.Unmarshal(m, b)
+}
+func (m *OfpActionExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpActionExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionExperimenter.Merge(m, src)
+}
+func (m *OfpActionExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpActionExperimenter.Size(m)
+}
+func (m *OfpActionExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionExperimenter proto.InternalMessageInfo
+
+func (m *OfpActionExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpActionExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Instruction header that is common to all instructions.  The length includes
+// the header and any padding used to make the instruction 64-bit aligned.
+// NB: The length of an instruction *must* always be a multiple of eight.
+type OfpInstruction struct {
+	Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	// Types that are valid to be assigned to Data:
+	//	*OfpInstruction_GotoTable
+	//	*OfpInstruction_WriteMetadata
+	//	*OfpInstruction_Actions
+	//	*OfpInstruction_Meter
+	//	*OfpInstruction_Experimenter
+	Data                 isOfpInstruction_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpInstruction) Reset()         { *m = OfpInstruction{} }
+func (m *OfpInstruction) String() string { return proto.CompactTextString(m) }
+func (*OfpInstruction) ProtoMessage()    {}
+func (*OfpInstruction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{23}
+}
+
+func (m *OfpInstruction) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstruction.Unmarshal(m, b)
+}
+func (m *OfpInstruction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstruction.Marshal(b, m, deterministic)
+}
+func (m *OfpInstruction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstruction.Merge(m, src)
+}
+func (m *OfpInstruction) XXX_Size() int {
+	return xxx_messageInfo_OfpInstruction.Size(m)
+}
+func (m *OfpInstruction) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstruction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstruction proto.InternalMessageInfo
+
+func (m *OfpInstruction) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+type isOfpInstruction_Data interface {
+	isOfpInstruction_Data()
+}
+
+type OfpInstruction_GotoTable struct {
+	GotoTable *OfpInstructionGotoTable `protobuf:"bytes,2,opt,name=goto_table,json=gotoTable,proto3,oneof"`
+}
+
+type OfpInstruction_WriteMetadata struct {
+	WriteMetadata *OfpInstructionWriteMetadata `protobuf:"bytes,3,opt,name=write_metadata,json=writeMetadata,proto3,oneof"`
+}
+
+type OfpInstruction_Actions struct {
+	Actions *OfpInstructionActions `protobuf:"bytes,4,opt,name=actions,proto3,oneof"`
+}
+
+type OfpInstruction_Meter struct {
+	Meter *OfpInstructionMeter `protobuf:"bytes,5,opt,name=meter,proto3,oneof"`
+}
+
+type OfpInstruction_Experimenter struct {
+	Experimenter *OfpInstructionExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpInstruction_GotoTable) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_WriteMetadata) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Actions) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Meter) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Experimenter) isOfpInstruction_Data() {}
+
+func (m *OfpInstruction) GetData() isOfpInstruction_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetGotoTable() *OfpInstructionGotoTable {
+	if x, ok := m.GetData().(*OfpInstruction_GotoTable); ok {
+		return x.GotoTable
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetWriteMetadata() *OfpInstructionWriteMetadata {
+	if x, ok := m.GetData().(*OfpInstruction_WriteMetadata); ok {
+		return x.WriteMetadata
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetActions() *OfpInstructionActions {
+	if x, ok := m.GetData().(*OfpInstruction_Actions); ok {
+		return x.Actions
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetMeter() *OfpInstructionMeter {
+	if x, ok := m.GetData().(*OfpInstruction_Meter); ok {
+		return x.Meter
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetExperimenter() *OfpInstructionExperimenter {
+	if x, ok := m.GetData().(*OfpInstruction_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpInstruction) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpInstruction_GotoTable)(nil),
+		(*OfpInstruction_WriteMetadata)(nil),
+		(*OfpInstruction_Actions)(nil),
+		(*OfpInstruction_Meter)(nil),
+		(*OfpInstruction_Experimenter)(nil),
+	}
+}
+
+// Instruction structure for OFPIT_GOTO_TABLE
+type OfpInstructionGotoTable struct {
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionGotoTable) Reset()         { *m = OfpInstructionGotoTable{} }
+func (m *OfpInstructionGotoTable) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionGotoTable) ProtoMessage()    {}
+func (*OfpInstructionGotoTable) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{24}
+}
+
+func (m *OfpInstructionGotoTable) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionGotoTable.Unmarshal(m, b)
+}
+func (m *OfpInstructionGotoTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionGotoTable.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionGotoTable) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionGotoTable.Merge(m, src)
+}
+func (m *OfpInstructionGotoTable) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionGotoTable.Size(m)
+}
+func (m *OfpInstructionGotoTable) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionGotoTable.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionGotoTable proto.InternalMessageInfo
+
+func (m *OfpInstructionGotoTable) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+// Instruction structure for OFPIT_WRITE_METADATA
+type OfpInstructionWriteMetadata struct {
+	Metadata             uint64   `protobuf:"varint,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	MetadataMask         uint64   `protobuf:"varint,2,opt,name=metadata_mask,json=metadataMask,proto3" json:"metadata_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionWriteMetadata) Reset()         { *m = OfpInstructionWriteMetadata{} }
+func (m *OfpInstructionWriteMetadata) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionWriteMetadata) ProtoMessage()    {}
+func (*OfpInstructionWriteMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{25}
+}
+
+func (m *OfpInstructionWriteMetadata) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Unmarshal(m, b)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionWriteMetadata.Merge(m, src)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Size(m)
+}
+func (m *OfpInstructionWriteMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionWriteMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionWriteMetadata proto.InternalMessageInfo
+
+func (m *OfpInstructionWriteMetadata) GetMetadata() uint64 {
+	if m != nil {
+		return m.Metadata
+	}
+	return 0
+}
+
+func (m *OfpInstructionWriteMetadata) GetMetadataMask() uint64 {
+	if m != nil {
+		return m.MetadataMask
+	}
+	return 0
+}
+
+// Instruction structure for OFPIT_WRITE/APPLY/CLEAR_ACTIONS
+type OfpInstructionActions struct {
+	Actions              []*OfpAction `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpInstructionActions) Reset()         { *m = OfpInstructionActions{} }
+func (m *OfpInstructionActions) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionActions) ProtoMessage()    {}
+func (*OfpInstructionActions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{26}
+}
+
+func (m *OfpInstructionActions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionActions.Unmarshal(m, b)
+}
+func (m *OfpInstructionActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionActions.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionActions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionActions.Merge(m, src)
+}
+func (m *OfpInstructionActions) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionActions.Size(m)
+}
+func (m *OfpInstructionActions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionActions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionActions proto.InternalMessageInfo
+
+func (m *OfpInstructionActions) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Instruction structure for OFPIT_METER
+type OfpInstructionMeter struct {
+	MeterId              uint32   `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionMeter) Reset()         { *m = OfpInstructionMeter{} }
+func (m *OfpInstructionMeter) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionMeter) ProtoMessage()    {}
+func (*OfpInstructionMeter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{27}
+}
+
+func (m *OfpInstructionMeter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionMeter.Unmarshal(m, b)
+}
+func (m *OfpInstructionMeter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionMeter.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionMeter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionMeter.Merge(m, src)
+}
+func (m *OfpInstructionMeter) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionMeter.Size(m)
+}
+func (m *OfpInstructionMeter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionMeter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionMeter proto.InternalMessageInfo
+
+func (m *OfpInstructionMeter) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+// Instruction structure for experimental instructions
+type OfpInstructionExperimenter struct {
+	Experimenter uint32 `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	// Experimenter-defined arbitrary additional data.
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionExperimenter) Reset()         { *m = OfpInstructionExperimenter{} }
+func (m *OfpInstructionExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionExperimenter) ProtoMessage()    {}
+func (*OfpInstructionExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{28}
+}
+
+func (m *OfpInstructionExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionExperimenter.Unmarshal(m, b)
+}
+func (m *OfpInstructionExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionExperimenter.Merge(m, src)
+}
+func (m *OfpInstructionExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionExperimenter.Size(m)
+}
+func (m *OfpInstructionExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionExperimenter proto.InternalMessageInfo
+
+func (m *OfpInstructionExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpInstructionExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Flow setup and teardown (controller -> datapath).
+type OfpFlowMod struct {
+	//ofp_header header;
+	Cookie               uint64            `protobuf:"varint,1,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64            `protobuf:"varint,2,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	TableId              uint32            `protobuf:"varint,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Command              OfpFlowModCommand `protobuf:"varint,4,opt,name=command,proto3,enum=openflow_13.OfpFlowModCommand" json:"command,omitempty"`
+	IdleTimeout          uint32            `protobuf:"varint,5,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32            `protobuf:"varint,6,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	Priority             uint32            `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"`
+	BufferId             uint32            `protobuf:"varint,8,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	OutPort              uint32            `protobuf:"varint,9,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32            `protobuf:"varint,10,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Flags                uint32            `protobuf:"varint,11,opt,name=flags,proto3" json:"flags,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,12,opt,name=match,proto3" json:"match,omitempty"`
+	Instructions         []*OfpInstruction `protobuf:"bytes,13,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpFlowMod) Reset()         { *m = OfpFlowMod{} }
+func (m *OfpFlowMod) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowMod) ProtoMessage()    {}
+func (*OfpFlowMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{29}
+}
+
+func (m *OfpFlowMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowMod.Unmarshal(m, b)
+}
+func (m *OfpFlowMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowMod.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowMod.Merge(m, src)
+}
+func (m *OfpFlowMod) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowMod.Size(m)
+}
+func (m *OfpFlowMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowMod proto.InternalMessageInfo
+
+func (m *OfpFlowMod) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetCommand() OfpFlowModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpFlowModCommand_OFPFC_ADD
+}
+
+func (m *OfpFlowMod) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpFlowMod) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Bucket for use in groups.
+type OfpBucket struct {
+	Weight               uint32       `protobuf:"varint,1,opt,name=weight,proto3" json:"weight,omitempty"`
+	WatchPort            uint32       `protobuf:"varint,2,opt,name=watch_port,json=watchPort,proto3" json:"watch_port,omitempty"`
+	WatchGroup           uint32       `protobuf:"varint,3,opt,name=watch_group,json=watchGroup,proto3" json:"watch_group,omitempty"`
+	Actions              []*OfpAction `protobuf:"bytes,4,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpBucket) Reset()         { *m = OfpBucket{} }
+func (m *OfpBucket) String() string { return proto.CompactTextString(m) }
+func (*OfpBucket) ProtoMessage()    {}
+func (*OfpBucket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{30}
+}
+
+func (m *OfpBucket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpBucket.Unmarshal(m, b)
+}
+func (m *OfpBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpBucket.Marshal(b, m, deterministic)
+}
+func (m *OfpBucket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpBucket.Merge(m, src)
+}
+func (m *OfpBucket) XXX_Size() int {
+	return xxx_messageInfo_OfpBucket.Size(m)
+}
+func (m *OfpBucket) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpBucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpBucket proto.InternalMessageInfo
+
+func (m *OfpBucket) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetWatchPort() uint32 {
+	if m != nil {
+		return m.WatchPort
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetWatchGroup() uint32 {
+	if m != nil {
+		return m.WatchGroup
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Group setup and teardown (controller -> datapath).
+type OfpGroupMod struct {
+	//ofp_header header;
+	Command              OfpGroupModCommand `protobuf:"varint,1,opt,name=command,proto3,enum=openflow_13.OfpGroupModCommand" json:"command,omitempty"`
+	Type                 OfpGroupType       `protobuf:"varint,2,opt,name=type,proto3,enum=openflow_13.OfpGroupType" json:"type,omitempty"`
+	GroupId              uint32             `protobuf:"varint,3,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	Buckets              []*OfpBucket       `protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *OfpGroupMod) Reset()         { *m = OfpGroupMod{} }
+func (m *OfpGroupMod) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupMod) ProtoMessage()    {}
+func (*OfpGroupMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{31}
+}
+
+func (m *OfpGroupMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupMod.Unmarshal(m, b)
+}
+func (m *OfpGroupMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupMod.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupMod.Merge(m, src)
+}
+func (m *OfpGroupMod) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupMod.Size(m)
+}
+func (m *OfpGroupMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupMod proto.InternalMessageInfo
+
+func (m *OfpGroupMod) GetCommand() OfpGroupModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpGroupModCommand_OFPGC_ADD
+}
+
+func (m *OfpGroupMod) GetType() OfpGroupType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpGroupType_OFPGT_ALL
+}
+
+func (m *OfpGroupMod) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupMod) GetBuckets() []*OfpBucket {
+	if m != nil {
+		return m.Buckets
+	}
+	return nil
+}
+
+// Send packet (controller -> datapath).
+type OfpPacketOut struct {
+	//ofp_header header;
+	BufferId uint32       `protobuf:"varint,1,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	InPort   uint32       `protobuf:"varint,2,opt,name=in_port,json=inPort,proto3" json:"in_port,omitempty"`
+	Actions  []*OfpAction `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
+	// The variable size action list is optionally followed by packet data.
+	// This data is only present and meaningful if buffer_id == -1.
+	Data                 []byte   `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPacketOut) Reset()         { *m = OfpPacketOut{} }
+func (m *OfpPacketOut) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketOut) ProtoMessage()    {}
+func (*OfpPacketOut) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{32}
+}
+
+func (m *OfpPacketOut) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketOut.Unmarshal(m, b)
+}
+func (m *OfpPacketOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketOut.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketOut) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketOut.Merge(m, src)
+}
+func (m *OfpPacketOut) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketOut.Size(m)
+}
+func (m *OfpPacketOut) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketOut.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketOut proto.InternalMessageInfo
+
+func (m *OfpPacketOut) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpPacketOut) GetInPort() uint32 {
+	if m != nil {
+		return m.InPort
+	}
+	return 0
+}
+
+func (m *OfpPacketOut) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+func (m *OfpPacketOut) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Packet received on port (datapath -> controller).
+type OfpPacketIn struct {
+	//ofp_header header;
+	BufferId             uint32            `protobuf:"varint,1,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	Reason               OfpPacketInReason `protobuf:"varint,2,opt,name=reason,proto3,enum=openflow_13.OfpPacketInReason" json:"reason,omitempty"`
+	TableId              uint32            `protobuf:"varint,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Cookie               uint64            `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,5,opt,name=match,proto3" json:"match,omitempty"`
+	Data                 []byte            `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpPacketIn) Reset()         { *m = OfpPacketIn{} }
+func (m *OfpPacketIn) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketIn) ProtoMessage()    {}
+func (*OfpPacketIn) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{33}
+}
+
+func (m *OfpPacketIn) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketIn.Unmarshal(m, b)
+}
+func (m *OfpPacketIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketIn.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketIn) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketIn.Merge(m, src)
+}
+func (m *OfpPacketIn) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketIn.Size(m)
+}
+func (m *OfpPacketIn) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketIn.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketIn proto.InternalMessageInfo
+
+func (m *OfpPacketIn) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetReason() OfpPacketInReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpPacketInReason_OFPR_NO_MATCH
+}
+
+func (m *OfpPacketIn) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpPacketIn) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Flow removed (datapath -> controller).
+type OfpFlowRemoved struct {
+	//ofp_header header;
+	Cookie               uint64               `protobuf:"varint,1,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Priority             uint32               `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"`
+	Reason               OfpFlowRemovedReason `protobuf:"varint,3,opt,name=reason,proto3,enum=openflow_13.OfpFlowRemovedReason" json:"reason,omitempty"`
+	TableId              uint32               `protobuf:"varint,4,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	DurationSec          uint32               `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32               `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	IdleTimeout          uint32               `protobuf:"varint,7,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32               `protobuf:"varint,8,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	PacketCount          uint64               `protobuf:"varint,9,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64               `protobuf:"varint,10,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	Match                *OfpMatch            `protobuf:"bytes,121,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *OfpFlowRemoved) Reset()         { *m = OfpFlowRemoved{} }
+func (m *OfpFlowRemoved) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowRemoved) ProtoMessage()    {}
+func (*OfpFlowRemoved) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{34}
+}
+
+func (m *OfpFlowRemoved) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowRemoved.Unmarshal(m, b)
+}
+func (m *OfpFlowRemoved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowRemoved.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowRemoved) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowRemoved.Merge(m, src)
+}
+func (m *OfpFlowRemoved) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowRemoved.Size(m)
+}
+func (m *OfpFlowRemoved) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowRemoved.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowRemoved proto.InternalMessageInfo
+
+func (m *OfpFlowRemoved) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetReason() OfpFlowRemovedReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT
+}
+
+func (m *OfpFlowRemoved) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Common header for all meter bands
+type OfpMeterBandHeader struct {
+	Type      OfpMeterBandType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMeterBandType" json:"type,omitempty"`
+	Rate      uint32           `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	BurstSize uint32           `protobuf:"varint,3,opt,name=burst_size,json=burstSize,proto3" json:"burst_size,omitempty"`
+	// Types that are valid to be assigned to Data:
+	//	*OfpMeterBandHeader_Drop
+	//	*OfpMeterBandHeader_DscpRemark
+	//	*OfpMeterBandHeader_Experimenter
+	Data                 isOfpMeterBandHeader_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *OfpMeterBandHeader) Reset()         { *m = OfpMeterBandHeader{} }
+func (m *OfpMeterBandHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandHeader) ProtoMessage()    {}
+func (*OfpMeterBandHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{35}
+}
+
+func (m *OfpMeterBandHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandHeader.Unmarshal(m, b)
+}
+func (m *OfpMeterBandHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandHeader.Merge(m, src)
+}
+func (m *OfpMeterBandHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandHeader.Size(m)
+}
+func (m *OfpMeterBandHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandHeader proto.InternalMessageInfo
+
+func (m *OfpMeterBandHeader) GetType() OfpMeterBandType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMeterBandType_OFPMBT_INVALID
+}
+
+func (m *OfpMeterBandHeader) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+func (m *OfpMeterBandHeader) GetBurstSize() uint32 {
+	if m != nil {
+		return m.BurstSize
+	}
+	return 0
+}
+
+type isOfpMeterBandHeader_Data interface {
+	isOfpMeterBandHeader_Data()
+}
+
+type OfpMeterBandHeader_Drop struct {
+	Drop *OfpMeterBandDrop `protobuf:"bytes,4,opt,name=drop,proto3,oneof"`
+}
+
+type OfpMeterBandHeader_DscpRemark struct {
+	DscpRemark *OfpMeterBandDscpRemark `protobuf:"bytes,5,opt,name=dscp_remark,json=dscpRemark,proto3,oneof"`
+}
+
+type OfpMeterBandHeader_Experimenter struct {
+	Experimenter *OfpMeterBandExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpMeterBandHeader_Drop) isOfpMeterBandHeader_Data() {}
+
+func (*OfpMeterBandHeader_DscpRemark) isOfpMeterBandHeader_Data() {}
+
+func (*OfpMeterBandHeader_Experimenter) isOfpMeterBandHeader_Data() {}
+
+func (m *OfpMeterBandHeader) GetData() isOfpMeterBandHeader_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetDrop() *OfpMeterBandDrop {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_Drop); ok {
+		return x.Drop
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetDscpRemark() *OfpMeterBandDscpRemark {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_DscpRemark); ok {
+		return x.DscpRemark
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetExperimenter() *OfpMeterBandExperimenter {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpMeterBandHeader) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpMeterBandHeader_Drop)(nil),
+		(*OfpMeterBandHeader_DscpRemark)(nil),
+		(*OfpMeterBandHeader_Experimenter)(nil),
+	}
+}
+
+// OFPMBT_DROP band - drop packets
+type OfpMeterBandDrop struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandDrop) Reset()         { *m = OfpMeterBandDrop{} }
+func (m *OfpMeterBandDrop) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandDrop) ProtoMessage()    {}
+func (*OfpMeterBandDrop) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{36}
+}
+
+func (m *OfpMeterBandDrop) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandDrop.Unmarshal(m, b)
+}
+func (m *OfpMeterBandDrop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandDrop.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandDrop) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandDrop.Merge(m, src)
+}
+func (m *OfpMeterBandDrop) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandDrop.Size(m)
+}
+func (m *OfpMeterBandDrop) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandDrop.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandDrop proto.InternalMessageInfo
+
+// OFPMBT_DSCP_REMARK band - Remark DSCP in the IP header
+type OfpMeterBandDscpRemark struct {
+	PrecLevel            uint32   `protobuf:"varint,1,opt,name=prec_level,json=precLevel,proto3" json:"prec_level,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandDscpRemark) Reset()         { *m = OfpMeterBandDscpRemark{} }
+func (m *OfpMeterBandDscpRemark) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandDscpRemark) ProtoMessage()    {}
+func (*OfpMeterBandDscpRemark) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{37}
+}
+
+func (m *OfpMeterBandDscpRemark) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Unmarshal(m, b)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandDscpRemark.Merge(m, src)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Size(m)
+}
+func (m *OfpMeterBandDscpRemark) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandDscpRemark.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandDscpRemark proto.InternalMessageInfo
+
+func (m *OfpMeterBandDscpRemark) GetPrecLevel() uint32 {
+	if m != nil {
+		return m.PrecLevel
+	}
+	return 0
+}
+
+// OFPMBT_EXPERIMENTER band - Experimenter type.
+// The rest of the band is experimenter-defined.
+type OfpMeterBandExperimenter struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandExperimenter) Reset()         { *m = OfpMeterBandExperimenter{} }
+func (m *OfpMeterBandExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandExperimenter) ProtoMessage()    {}
+func (*OfpMeterBandExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{38}
+}
+
+func (m *OfpMeterBandExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Unmarshal(m, b)
+}
+func (m *OfpMeterBandExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandExperimenter.Merge(m, src)
+}
+func (m *OfpMeterBandExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Size(m)
+}
+func (m *OfpMeterBandExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandExperimenter proto.InternalMessageInfo
+
+func (m *OfpMeterBandExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+// Meter configuration. OFPT_METER_MOD.
+type OfpMeterMod struct {
+	Command              OfpMeterModCommand    `protobuf:"varint,1,opt,name=command,proto3,enum=openflow_13.OfpMeterModCommand" json:"command,omitempty"`
+	Flags                uint32                `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	MeterId              uint32                `protobuf:"varint,3,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	Bands                []*OfpMeterBandHeader `protobuf:"bytes,4,rep,name=bands,proto3" json:"bands,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpMeterMod) Reset()         { *m = OfpMeterMod{} }
+func (m *OfpMeterMod) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterMod) ProtoMessage()    {}
+func (*OfpMeterMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{39}
+}
+
+func (m *OfpMeterMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterMod.Unmarshal(m, b)
+}
+func (m *OfpMeterMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterMod.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterMod.Merge(m, src)
+}
+func (m *OfpMeterMod) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterMod.Size(m)
+}
+func (m *OfpMeterMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterMod proto.InternalMessageInfo
+
+func (m *OfpMeterMod) GetCommand() OfpMeterModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpMeterModCommand_OFPMC_ADD
+}
+
+func (m *OfpMeterMod) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMeterMod) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterMod) GetBands() []*OfpMeterBandHeader {
+	if m != nil {
+		return m.Bands
+	}
+	return nil
+}
+
+// OFPT_ERROR: Error message (datapath -> controller).
+type OfpErrorMsg struct {
+	//ofp_header header;
+	Type                 uint32   `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	Code                 uint32   `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
+	Data                 []byte   `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpErrorMsg) Reset()         { *m = OfpErrorMsg{} }
+func (m *OfpErrorMsg) String() string { return proto.CompactTextString(m) }
+func (*OfpErrorMsg) ProtoMessage()    {}
+func (*OfpErrorMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{40}
+}
+
+func (m *OfpErrorMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpErrorMsg.Unmarshal(m, b)
+}
+func (m *OfpErrorMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpErrorMsg.Marshal(b, m, deterministic)
+}
+func (m *OfpErrorMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpErrorMsg.Merge(m, src)
+}
+func (m *OfpErrorMsg) XXX_Size() int {
+	return xxx_messageInfo_OfpErrorMsg.Size(m)
+}
+func (m *OfpErrorMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpErrorMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpErrorMsg proto.InternalMessageInfo
+
+func (m *OfpErrorMsg) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpErrorMsg) GetCode() uint32 {
+	if m != nil {
+		return m.Code
+	}
+	return 0
+}
+
+func (m *OfpErrorMsg) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// OFPET_EXPERIMENTER: Error message (datapath -> controller).
+type OfpErrorExperimenterMsg struct {
+	Type                 uint32   `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Experimenter         uint32   `protobuf:"varint,3,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte   `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpErrorExperimenterMsg) Reset()         { *m = OfpErrorExperimenterMsg{} }
+func (m *OfpErrorExperimenterMsg) String() string { return proto.CompactTextString(m) }
+func (*OfpErrorExperimenterMsg) ProtoMessage()    {}
+func (*OfpErrorExperimenterMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{41}
+}
+
+func (m *OfpErrorExperimenterMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Unmarshal(m, b)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Marshal(b, m, deterministic)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpErrorExperimenterMsg.Merge(m, src)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Size() int {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Size(m)
+}
+func (m *OfpErrorExperimenterMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpErrorExperimenterMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpErrorExperimenterMsg proto.InternalMessageInfo
+
+func (m *OfpErrorExperimenterMsg) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+type OfpMultipartRequest struct {
+	//ofp_header header;
+	Type                 OfpMultipartType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMultipartType" json:"type,omitempty"`
+	Flags                uint32           `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	Body                 []byte           `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *OfpMultipartRequest) Reset()         { *m = OfpMultipartRequest{} }
+func (m *OfpMultipartRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpMultipartRequest) ProtoMessage()    {}
+func (*OfpMultipartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{42}
+}
+
+func (m *OfpMultipartRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMultipartRequest.Unmarshal(m, b)
+}
+func (m *OfpMultipartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMultipartRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpMultipartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMultipartRequest.Merge(m, src)
+}
+func (m *OfpMultipartRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpMultipartRequest.Size(m)
+}
+func (m *OfpMultipartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMultipartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMultipartRequest proto.InternalMessageInfo
+
+func (m *OfpMultipartRequest) GetType() OfpMultipartType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMultipartType_OFPMP_DESC
+}
+
+func (m *OfpMultipartRequest) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMultipartRequest) GetBody() []byte {
+	if m != nil {
+		return m.Body
+	}
+	return nil
+}
+
+type OfpMultipartReply struct {
+	//ofp_header header;
+	Type                 OfpMultipartType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMultipartType" json:"type,omitempty"`
+	Flags                uint32           `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	Body                 []byte           `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *OfpMultipartReply) Reset()         { *m = OfpMultipartReply{} }
+func (m *OfpMultipartReply) String() string { return proto.CompactTextString(m) }
+func (*OfpMultipartReply) ProtoMessage()    {}
+func (*OfpMultipartReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{43}
+}
+
+func (m *OfpMultipartReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMultipartReply.Unmarshal(m, b)
+}
+func (m *OfpMultipartReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMultipartReply.Marshal(b, m, deterministic)
+}
+func (m *OfpMultipartReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMultipartReply.Merge(m, src)
+}
+func (m *OfpMultipartReply) XXX_Size() int {
+	return xxx_messageInfo_OfpMultipartReply.Size(m)
+}
+func (m *OfpMultipartReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMultipartReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMultipartReply proto.InternalMessageInfo
+
+func (m *OfpMultipartReply) GetType() OfpMultipartType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMultipartType_OFPMP_DESC
+}
+
+func (m *OfpMultipartReply) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMultipartReply) GetBody() []byte {
+	if m != nil {
+		return m.Body
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_DESC request.  Each entry is a NULL-terminated
+// ASCII string.
+type OfpDesc struct {
+	MfrDesc              string   `protobuf:"bytes,1,opt,name=mfr_desc,json=mfrDesc,proto3" json:"mfr_desc,omitempty"`
+	HwDesc               string   `protobuf:"bytes,2,opt,name=hw_desc,json=hwDesc,proto3" json:"hw_desc,omitempty"`
+	SwDesc               string   `protobuf:"bytes,3,opt,name=sw_desc,json=swDesc,proto3" json:"sw_desc,omitempty"`
+	SerialNum            string   `protobuf:"bytes,4,opt,name=serial_num,json=serialNum,proto3" json:"serial_num,omitempty"`
+	DpDesc               string   `protobuf:"bytes,5,opt,name=dp_desc,json=dpDesc,proto3" json:"dp_desc,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpDesc) Reset()         { *m = OfpDesc{} }
+func (m *OfpDesc) String() string { return proto.CompactTextString(m) }
+func (*OfpDesc) ProtoMessage()    {}
+func (*OfpDesc) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{44}
+}
+
+func (m *OfpDesc) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpDesc.Unmarshal(m, b)
+}
+func (m *OfpDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpDesc.Marshal(b, m, deterministic)
+}
+func (m *OfpDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpDesc.Merge(m, src)
+}
+func (m *OfpDesc) XXX_Size() int {
+	return xxx_messageInfo_OfpDesc.Size(m)
+}
+func (m *OfpDesc) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpDesc.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpDesc proto.InternalMessageInfo
+
+func (m *OfpDesc) GetMfrDesc() string {
+	if m != nil {
+		return m.MfrDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetHwDesc() string {
+	if m != nil {
+		return m.HwDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetSwDesc() string {
+	if m != nil {
+		return m.SwDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetSerialNum() string {
+	if m != nil {
+		return m.SerialNum
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetDpDesc() string {
+	if m != nil {
+		return m.DpDesc
+	}
+	return ""
+}
+
+// Body for ofp_multipart_request of type OFPMP_FLOW.
+type OfpFlowStatsRequest struct {
+	TableId              uint32    `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	OutPort              uint32    `protobuf:"varint,2,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32    `protobuf:"varint,3,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Cookie               uint64    `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64    `protobuf:"varint,5,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	Match                *OfpMatch `protobuf:"bytes,6,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *OfpFlowStatsRequest) Reset()         { *m = OfpFlowStatsRequest{} }
+func (m *OfpFlowStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowStatsRequest) ProtoMessage()    {}
+func (*OfpFlowStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{45}
+}
+
+func (m *OfpFlowStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpFlowStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowStatsRequest.Merge(m, src)
+}
+func (m *OfpFlowStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowStatsRequest.Size(m)
+}
+func (m *OfpFlowStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowStatsRequest proto.InternalMessageInfo
+
+func (m *OfpFlowStatsRequest) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_FLOW request.
+type OfpFlowStats struct {
+	Id                   uint64            `protobuf:"varint,14,opt,name=id,proto3" json:"id,omitempty"`
+	TableId              uint32            `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	DurationSec          uint32            `protobuf:"varint,2,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32            `protobuf:"varint,3,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	Priority             uint32            `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"`
+	IdleTimeout          uint32            `protobuf:"varint,5,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32            `protobuf:"varint,6,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	Flags                uint32            `protobuf:"varint,7,opt,name=flags,proto3" json:"flags,omitempty"`
+	Cookie               uint64            `protobuf:"varint,8,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	PacketCount          uint64            `protobuf:"varint,9,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64            `protobuf:"varint,10,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,12,opt,name=match,proto3" json:"match,omitempty"`
+	Instructions         []*OfpInstruction `protobuf:"bytes,13,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpFlowStats) Reset()         { *m = OfpFlowStats{} }
+func (m *OfpFlowStats) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowStats) ProtoMessage()    {}
+func (*OfpFlowStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{46}
+}
+
+func (m *OfpFlowStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowStats.Unmarshal(m, b)
+}
+func (m *OfpFlowStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowStats.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowStats.Merge(m, src)
+}
+func (m *OfpFlowStats) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowStats.Size(m)
+}
+func (m *OfpFlowStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowStats proto.InternalMessageInfo
+
+func (m *OfpFlowStats) GetId() uint64 {
+	if m != nil {
+		return m.Id
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpFlowStats) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request of type OFPMP_AGGREGATE.
+type OfpAggregateStatsRequest struct {
+	TableId              uint32    `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	OutPort              uint32    `protobuf:"varint,2,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32    `protobuf:"varint,3,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Cookie               uint64    `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64    `protobuf:"varint,5,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	Match                *OfpMatch `protobuf:"bytes,6,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *OfpAggregateStatsRequest) Reset()         { *m = OfpAggregateStatsRequest{} }
+func (m *OfpAggregateStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpAggregateStatsRequest) ProtoMessage()    {}
+func (*OfpAggregateStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{47}
+}
+
+func (m *OfpAggregateStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpAggregateStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpAggregateStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAggregateStatsRequest.Merge(m, src)
+}
+func (m *OfpAggregateStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Size(m)
+}
+func (m *OfpAggregateStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAggregateStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAggregateStatsRequest proto.InternalMessageInfo
+
+func (m *OfpAggregateStatsRequest) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_AGGREGATE request.
+type OfpAggregateStatsReply struct {
+	PacketCount          uint64   `protobuf:"varint,1,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64   `protobuf:"varint,2,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	FlowCount            uint32   `protobuf:"varint,3,opt,name=flow_count,json=flowCount,proto3" json:"flow_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpAggregateStatsReply) Reset()         { *m = OfpAggregateStatsReply{} }
+func (m *OfpAggregateStatsReply) String() string { return proto.CompactTextString(m) }
+func (*OfpAggregateStatsReply) ProtoMessage()    {}
+func (*OfpAggregateStatsReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{48}
+}
+
+func (m *OfpAggregateStatsReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAggregateStatsReply.Unmarshal(m, b)
+}
+func (m *OfpAggregateStatsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAggregateStatsReply.Marshal(b, m, deterministic)
+}
+func (m *OfpAggregateStatsReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAggregateStatsReply.Merge(m, src)
+}
+func (m *OfpAggregateStatsReply) XXX_Size() int {
+	return xxx_messageInfo_OfpAggregateStatsReply.Size(m)
+}
+func (m *OfpAggregateStatsReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAggregateStatsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAggregateStatsReply proto.InternalMessageInfo
+
+func (m *OfpAggregateStatsReply) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsReply) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsReply) GetFlowCount() uint32 {
+	if m != nil {
+		return m.FlowCount
+	}
+	return 0
+}
+
+// Common header for all Table Feature Properties
+type OfpTableFeatureProperty struct {
+	Type OfpTableFeaturePropType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpTableFeaturePropType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Value:
+	//	*OfpTableFeatureProperty_Instructions
+	//	*OfpTableFeatureProperty_NextTables
+	//	*OfpTableFeatureProperty_Actions
+	//	*OfpTableFeatureProperty_Oxm
+	//	*OfpTableFeatureProperty_Experimenter
+	Value                isOfpTableFeatureProperty_Value `protobuf_oneof:"value"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *OfpTableFeatureProperty) Reset()         { *m = OfpTableFeatureProperty{} }
+func (m *OfpTableFeatureProperty) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeatureProperty) ProtoMessage()    {}
+func (*OfpTableFeatureProperty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{49}
+}
+
+func (m *OfpTableFeatureProperty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeatureProperty.Unmarshal(m, b)
+}
+func (m *OfpTableFeatureProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeatureProperty.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeatureProperty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeatureProperty.Merge(m, src)
+}
+func (m *OfpTableFeatureProperty) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeatureProperty.Size(m)
+}
+func (m *OfpTableFeatureProperty) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeatureProperty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeatureProperty proto.InternalMessageInfo
+
+func (m *OfpTableFeatureProperty) GetType() OfpTableFeaturePropType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS
+}
+
+type isOfpTableFeatureProperty_Value interface {
+	isOfpTableFeatureProperty_Value()
+}
+
+type OfpTableFeatureProperty_Instructions struct {
+	Instructions *OfpTableFeaturePropInstructions `protobuf:"bytes,2,opt,name=instructions,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_NextTables struct {
+	NextTables *OfpTableFeaturePropNextTables `protobuf:"bytes,3,opt,name=next_tables,json=nextTables,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Actions struct {
+	Actions *OfpTableFeaturePropActions `protobuf:"bytes,4,opt,name=actions,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Oxm struct {
+	Oxm *OfpTableFeaturePropOxm `protobuf:"bytes,5,opt,name=oxm,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Experimenter struct {
+	Experimenter *OfpTableFeaturePropExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpTableFeatureProperty_Instructions) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_NextTables) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Actions) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Oxm) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Experimenter) isOfpTableFeatureProperty_Value() {}
+
+func (m *OfpTableFeatureProperty) GetValue() isOfpTableFeatureProperty_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetInstructions() *OfpTableFeaturePropInstructions {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Instructions); ok {
+		return x.Instructions
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetNextTables() *OfpTableFeaturePropNextTables {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_NextTables); ok {
+		return x.NextTables
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetActions() *OfpTableFeaturePropActions {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Actions); ok {
+		return x.Actions
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetOxm() *OfpTableFeaturePropOxm {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Oxm); ok {
+		return x.Oxm
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetExperimenter() *OfpTableFeaturePropExperimenter {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpTableFeatureProperty) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpTableFeatureProperty_Instructions)(nil),
+		(*OfpTableFeatureProperty_NextTables)(nil),
+		(*OfpTableFeatureProperty_Actions)(nil),
+		(*OfpTableFeatureProperty_Oxm)(nil),
+		(*OfpTableFeatureProperty_Experimenter)(nil),
+	}
+}
+
+// Instructions property
+type OfpTableFeaturePropInstructions struct {
+	// One of OFPTFPT_INSTRUCTIONS,
+	//OFPTFPT_INSTRUCTIONS_MISS.
+	Instructions         []*OfpInstruction `protobuf:"bytes,1,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpTableFeaturePropInstructions) Reset()         { *m = OfpTableFeaturePropInstructions{} }
+func (m *OfpTableFeaturePropInstructions) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropInstructions) ProtoMessage()    {}
+func (*OfpTableFeaturePropInstructions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{50}
+}
+
+func (m *OfpTableFeaturePropInstructions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropInstructions.Merge(m, src)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Size(m)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropInstructions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropInstructions proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropInstructions) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Next Tables property
+type OfpTableFeaturePropNextTables struct {
+	// One of OFPTFPT_NEXT_TABLES,
+	//OFPTFPT_NEXT_TABLES_MISS.
+	NextTableIds         []uint32 `protobuf:"varint,1,rep,packed,name=next_table_ids,json=nextTableIds,proto3" json:"next_table_ids,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropNextTables) Reset()         { *m = OfpTableFeaturePropNextTables{} }
+func (m *OfpTableFeaturePropNextTables) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropNextTables) ProtoMessage()    {}
+func (*OfpTableFeaturePropNextTables) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{51}
+}
+
+func (m *OfpTableFeaturePropNextTables) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropNextTables.Merge(m, src)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Size(m)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropNextTables.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropNextTables proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropNextTables) GetNextTableIds() []uint32 {
+	if m != nil {
+		return m.NextTableIds
+	}
+	return nil
+}
+
+// Actions property
+type OfpTableFeaturePropActions struct {
+	// One of OFPTFPT_WRITE_ACTIONS,
+	//OFPTFPT_WRITE_ACTIONS_MISS,
+	//OFPTFPT_APPLY_ACTIONS,
+	//OFPTFPT_APPLY_ACTIONS_MISS.
+	Actions              []*OfpAction `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpTableFeaturePropActions) Reset()         { *m = OfpTableFeaturePropActions{} }
+func (m *OfpTableFeaturePropActions) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropActions) ProtoMessage()    {}
+func (*OfpTableFeaturePropActions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{52}
+}
+
+func (m *OfpTableFeaturePropActions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropActions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropActions.Merge(m, src)
+}
+func (m *OfpTableFeaturePropActions) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Size(m)
+}
+func (m *OfpTableFeaturePropActions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropActions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropActions proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropActions) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Match, Wildcard or Set-Field property
+type OfpTableFeaturePropOxm struct {
+	// TODO is this a uint32???
+	OxmIds               []uint32 `protobuf:"varint,3,rep,packed,name=oxm_ids,json=oxmIds,proto3" json:"oxm_ids,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropOxm) Reset()         { *m = OfpTableFeaturePropOxm{} }
+func (m *OfpTableFeaturePropOxm) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropOxm) ProtoMessage()    {}
+func (*OfpTableFeaturePropOxm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{53}
+}
+
+func (m *OfpTableFeaturePropOxm) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropOxm.Merge(m, src)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Size(m)
+}
+func (m *OfpTableFeaturePropOxm) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropOxm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropOxm proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropOxm) GetOxmIds() []uint32 {
+	if m != nil {
+		return m.OxmIds
+	}
+	return nil
+}
+
+// Experimenter table feature property
+type OfpTableFeaturePropExperimenter struct {
+	// One of OFPTFPT_EXPERIMENTER,
+	//OFPTFPT_EXPERIMENTER_MISS.
+	Experimenter         uint32   `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,3,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	ExperimenterData     []uint32 `protobuf:"varint,4,rep,packed,name=experimenter_data,json=experimenterData,proto3" json:"experimenter_data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropExperimenter) Reset()         { *m = OfpTableFeaturePropExperimenter{} }
+func (m *OfpTableFeaturePropExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropExperimenter) ProtoMessage()    {}
+func (*OfpTableFeaturePropExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{54}
+}
+
+func (m *OfpTableFeaturePropExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropExperimenter.Merge(m, src)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Size(m)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropExperimenter proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpTableFeaturePropExperimenter) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpTableFeaturePropExperimenter) GetExperimenterData() []uint32 {
+	if m != nil {
+		return m.ExperimenterData
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request of type OFPMP_TABLE_FEATURES./
+// Body of reply to OFPMP_TABLE_FEATURES request.
+type OfpTableFeatures struct {
+	TableId       uint32 `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Name          string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	MetadataMatch uint64 `protobuf:"varint,3,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"`
+	MetadataWrite uint64 `protobuf:"varint,4,opt,name=metadata_write,json=metadataWrite,proto3" json:"metadata_write,omitempty"`
+	Config        uint32 `protobuf:"varint,5,opt,name=config,proto3" json:"config,omitempty"`
+	MaxEntries    uint32 `protobuf:"varint,6,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// Table Feature Property list
+	Properties           []*OfpTableFeatureProperty `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *OfpTableFeatures) Reset()         { *m = OfpTableFeatures{} }
+func (m *OfpTableFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeatures) ProtoMessage()    {}
+func (*OfpTableFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{55}
+}
+
+func (m *OfpTableFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeatures.Unmarshal(m, b)
+}
+func (m *OfpTableFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeatures.Merge(m, src)
+}
+func (m *OfpTableFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeatures.Size(m)
+}
+func (m *OfpTableFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeatures proto.InternalMessageInfo
+
+func (m *OfpTableFeatures) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *OfpTableFeatures) GetMetadataMatch() uint64 {
+	if m != nil {
+		return m.MetadataMatch
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetMetadataWrite() uint64 {
+	if m != nil {
+		return m.MetadataWrite
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetMaxEntries() uint32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetProperties() []*OfpTableFeatureProperty {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_TABLE request.
+type OfpTableStats struct {
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	ActiveCount          uint32   `protobuf:"varint,2,opt,name=active_count,json=activeCount,proto3" json:"active_count,omitempty"`
+	LookupCount          uint64   `protobuf:"varint,3,opt,name=lookup_count,json=lookupCount,proto3" json:"lookup_count,omitempty"`
+	MatchedCount         uint64   `protobuf:"varint,4,opt,name=matched_count,json=matchedCount,proto3" json:"matched_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableStats) Reset()         { *m = OfpTableStats{} }
+func (m *OfpTableStats) String() string { return proto.CompactTextString(m) }
+func (*OfpTableStats) ProtoMessage()    {}
+func (*OfpTableStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{56}
+}
+
+func (m *OfpTableStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableStats.Unmarshal(m, b)
+}
+func (m *OfpTableStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableStats.Marshal(b, m, deterministic)
+}
+func (m *OfpTableStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableStats.Merge(m, src)
+}
+func (m *OfpTableStats) XXX_Size() int {
+	return xxx_messageInfo_OfpTableStats.Size(m)
+}
+func (m *OfpTableStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableStats proto.InternalMessageInfo
+
+func (m *OfpTableStats) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetActiveCount() uint32 {
+	if m != nil {
+		return m.ActiveCount
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetLookupCount() uint64 {
+	if m != nil {
+		return m.LookupCount
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetMatchedCount() uint64 {
+	if m != nil {
+		return m.MatchedCount
+	}
+	return 0
+}
+
+// Body for ofp_multipart_request of type OFPMP_PORT.
+type OfpPortStatsRequest struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortStatsRequest) Reset()         { *m = OfpPortStatsRequest{} }
+func (m *OfpPortStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStatsRequest) ProtoMessage()    {}
+func (*OfpPortStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{57}
+}
+
+func (m *OfpPortStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpPortStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStatsRequest.Merge(m, src)
+}
+func (m *OfpPortStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStatsRequest.Size(m)
+}
+func (m *OfpPortStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStatsRequest proto.InternalMessageInfo
+
+func (m *OfpPortStatsRequest) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_PORT request. If a counter is unsupported, set
+// the field to all ones.
+type OfpPortStats struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	RxPackets            uint64   `protobuf:"varint,2,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	TxPackets            uint64   `protobuf:"varint,3,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	RxBytes              uint64   `protobuf:"varint,4,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	TxBytes              uint64   `protobuf:"varint,5,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	RxDropped            uint64   `protobuf:"varint,6,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"`
+	TxDropped            uint64   `protobuf:"varint,7,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"`
+	RxErrors             uint64   `protobuf:"varint,8,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
+	TxErrors             uint64   `protobuf:"varint,9,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	RxFrameErr           uint64   `protobuf:"varint,10,opt,name=rx_frame_err,json=rxFrameErr,proto3" json:"rx_frame_err,omitempty"`
+	RxOverErr            uint64   `protobuf:"varint,11,opt,name=rx_over_err,json=rxOverErr,proto3" json:"rx_over_err,omitempty"`
+	RxCrcErr             uint64   `protobuf:"varint,12,opt,name=rx_crc_err,json=rxCrcErr,proto3" json:"rx_crc_err,omitempty"`
+	Collisions           uint64   `protobuf:"varint,13,opt,name=collisions,proto3" json:"collisions,omitempty"`
+	DurationSec          uint32   `protobuf:"varint,14,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32   `protobuf:"varint,15,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortStats) Reset()         { *m = OfpPortStats{} }
+func (m *OfpPortStats) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStats) ProtoMessage()    {}
+func (*OfpPortStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{58}
+}
+
+func (m *OfpPortStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStats.Unmarshal(m, b)
+}
+func (m *OfpPortStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStats.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStats.Merge(m, src)
+}
+func (m *OfpPortStats) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStats.Size(m)
+}
+func (m *OfpPortStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStats proto.InternalMessageInfo
+
+func (m *OfpPortStats) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxDropped() uint64 {
+	if m != nil {
+		return m.RxDropped
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxDropped() uint64 {
+	if m != nil {
+		return m.TxDropped
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxErrors() uint64 {
+	if m != nil {
+		return m.RxErrors
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxFrameErr() uint64 {
+	if m != nil {
+		return m.RxFrameErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxOverErr() uint64 {
+	if m != nil {
+		return m.RxOverErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxCrcErr() uint64 {
+	if m != nil {
+		return m.RxCrcErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetCollisions() uint64 {
+	if m != nil {
+		return m.Collisions
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+// Body of OFPMP_GROUP request.
+type OfpGroupStatsRequest struct {
+	GroupId              uint32   `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpGroupStatsRequest) Reset()         { *m = OfpGroupStatsRequest{} }
+func (m *OfpGroupStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupStatsRequest) ProtoMessage()    {}
+func (*OfpGroupStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{59}
+}
+
+func (m *OfpGroupStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpGroupStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupStatsRequest.Merge(m, src)
+}
+func (m *OfpGroupStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupStatsRequest.Size(m)
+}
+func (m *OfpGroupStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupStatsRequest proto.InternalMessageInfo
+
+func (m *OfpGroupStatsRequest) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+// Used in group stats replies.
+type OfpBucketCounter struct {
+	PacketCount          uint64   `protobuf:"varint,1,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64   `protobuf:"varint,2,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpBucketCounter) Reset()         { *m = OfpBucketCounter{} }
+func (m *OfpBucketCounter) String() string { return proto.CompactTextString(m) }
+func (*OfpBucketCounter) ProtoMessage()    {}
+func (*OfpBucketCounter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{60}
+}
+
+func (m *OfpBucketCounter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpBucketCounter.Unmarshal(m, b)
+}
+func (m *OfpBucketCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpBucketCounter.Marshal(b, m, deterministic)
+}
+func (m *OfpBucketCounter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpBucketCounter.Merge(m, src)
+}
+func (m *OfpBucketCounter) XXX_Size() int {
+	return xxx_messageInfo_OfpBucketCounter.Size(m)
+}
+func (m *OfpBucketCounter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpBucketCounter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpBucketCounter proto.InternalMessageInfo
+
+func (m *OfpBucketCounter) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpBucketCounter) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_GROUP request.
+type OfpGroupStats struct {
+	GroupId              uint32              `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	RefCount             uint32              `protobuf:"varint,2,opt,name=ref_count,json=refCount,proto3" json:"ref_count,omitempty"`
+	PacketCount          uint64              `protobuf:"varint,3,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64              `protobuf:"varint,4,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	DurationSec          uint32              `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32              `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	BucketStats          []*OfpBucketCounter `protobuf:"bytes,7,rep,name=bucket_stats,json=bucketStats,proto3" json:"bucket_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpGroupStats) Reset()         { *m = OfpGroupStats{} }
+func (m *OfpGroupStats) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupStats) ProtoMessage()    {}
+func (*OfpGroupStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{61}
+}
+
+func (m *OfpGroupStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupStats.Unmarshal(m, b)
+}
+func (m *OfpGroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupStats.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupStats.Merge(m, src)
+}
+func (m *OfpGroupStats) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupStats.Size(m)
+}
+func (m *OfpGroupStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupStats proto.InternalMessageInfo
+
+func (m *OfpGroupStats) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetRefCount() uint32 {
+	if m != nil {
+		return m.RefCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetBucketStats() []*OfpBucketCounter {
+	if m != nil {
+		return m.BucketStats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_GROUP_DESC request.
+type OfpGroupDesc struct {
+	Type                 OfpGroupType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpGroupType" json:"type,omitempty"`
+	GroupId              uint32       `protobuf:"varint,2,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	Buckets              []*OfpBucket `protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpGroupDesc) Reset()         { *m = OfpGroupDesc{} }
+func (m *OfpGroupDesc) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupDesc) ProtoMessage()    {}
+func (*OfpGroupDesc) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{62}
+}
+
+func (m *OfpGroupDesc) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupDesc.Unmarshal(m, b)
+}
+func (m *OfpGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupDesc.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupDesc.Merge(m, src)
+}
+func (m *OfpGroupDesc) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupDesc.Size(m)
+}
+func (m *OfpGroupDesc) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupDesc.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupDesc proto.InternalMessageInfo
+
+func (m *OfpGroupDesc) GetType() OfpGroupType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpGroupType_OFPGT_ALL
+}
+
+func (m *OfpGroupDesc) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupDesc) GetBuckets() []*OfpBucket {
+	if m != nil {
+		return m.Buckets
+	}
+	return nil
+}
+
+type OfpGroupEntry struct {
+	Desc                 *OfpGroupDesc  `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"`
+	Stats                *OfpGroupStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *OfpGroupEntry) Reset()         { *m = OfpGroupEntry{} }
+func (m *OfpGroupEntry) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupEntry) ProtoMessage()    {}
+func (*OfpGroupEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{63}
+}
+
+func (m *OfpGroupEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupEntry.Unmarshal(m, b)
+}
+func (m *OfpGroupEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupEntry.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupEntry.Merge(m, src)
+}
+func (m *OfpGroupEntry) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupEntry.Size(m)
+}
+func (m *OfpGroupEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupEntry proto.InternalMessageInfo
+
+func (m *OfpGroupEntry) GetDesc() *OfpGroupDesc {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+func (m *OfpGroupEntry) GetStats() *OfpGroupStats {
+	if m != nil {
+		return m.Stats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_GROUP_FEATURES request. Group features.
+type OfpGroupFeatures struct {
+	Types                uint32   `protobuf:"varint,1,opt,name=types,proto3" json:"types,omitempty"`
+	Capabilities         uint32   `protobuf:"varint,2,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	MaxGroups            []uint32 `protobuf:"varint,3,rep,packed,name=max_groups,json=maxGroups,proto3" json:"max_groups,omitempty"`
+	Actions              []uint32 `protobuf:"varint,4,rep,packed,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpGroupFeatures) Reset()         { *m = OfpGroupFeatures{} }
+func (m *OfpGroupFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupFeatures) ProtoMessage()    {}
+func (*OfpGroupFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{64}
+}
+
+func (m *OfpGroupFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupFeatures.Unmarshal(m, b)
+}
+func (m *OfpGroupFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupFeatures.Merge(m, src)
+}
+func (m *OfpGroupFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupFeatures.Size(m)
+}
+func (m *OfpGroupFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupFeatures proto.InternalMessageInfo
+
+func (m *OfpGroupFeatures) GetTypes() uint32 {
+	if m != nil {
+		return m.Types
+	}
+	return 0
+}
+
+func (m *OfpGroupFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+func (m *OfpGroupFeatures) GetMaxGroups() []uint32 {
+	if m != nil {
+		return m.MaxGroups
+	}
+	return nil
+}
+
+func (m *OfpGroupFeatures) GetActions() []uint32 {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Body of OFPMP_METER and OFPMP_METER_CONFIG requests.
+type OfpMeterMultipartRequest struct {
+	MeterId              uint32   `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterMultipartRequest) Reset()         { *m = OfpMeterMultipartRequest{} }
+func (m *OfpMeterMultipartRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterMultipartRequest) ProtoMessage()    {}
+func (*OfpMeterMultipartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{65}
+}
+
+func (m *OfpMeterMultipartRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Unmarshal(m, b)
+}
+func (m *OfpMeterMultipartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterMultipartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterMultipartRequest.Merge(m, src)
+}
+func (m *OfpMeterMultipartRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Size(m)
+}
+func (m *OfpMeterMultipartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterMultipartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterMultipartRequest proto.InternalMessageInfo
+
+func (m *OfpMeterMultipartRequest) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+// Statistics for each meter band
+type OfpMeterBandStats struct {
+	PacketBandCount      uint64   `protobuf:"varint,1,opt,name=packet_band_count,json=packetBandCount,proto3" json:"packet_band_count,omitempty"`
+	ByteBandCount        uint64   `protobuf:"varint,2,opt,name=byte_band_count,json=byteBandCount,proto3" json:"byte_band_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandStats) Reset()         { *m = OfpMeterBandStats{} }
+func (m *OfpMeterBandStats) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandStats) ProtoMessage()    {}
+func (*OfpMeterBandStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{66}
+}
+
+func (m *OfpMeterBandStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandStats.Unmarshal(m, b)
+}
+func (m *OfpMeterBandStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandStats.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandStats.Merge(m, src)
+}
+func (m *OfpMeterBandStats) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandStats.Size(m)
+}
+func (m *OfpMeterBandStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandStats proto.InternalMessageInfo
+
+func (m *OfpMeterBandStats) GetPacketBandCount() uint64 {
+	if m != nil {
+		return m.PacketBandCount
+	}
+	return 0
+}
+
+func (m *OfpMeterBandStats) GetByteBandCount() uint64 {
+	if m != nil {
+		return m.ByteBandCount
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_METER request. Meter statistics.
+type OfpMeterStats struct {
+	MeterId              uint32               `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	FlowCount            uint32               `protobuf:"varint,2,opt,name=flow_count,json=flowCount,proto3" json:"flow_count,omitempty"`
+	PacketInCount        uint64               `protobuf:"varint,3,opt,name=packet_in_count,json=packetInCount,proto3" json:"packet_in_count,omitempty"`
+	ByteInCount          uint64               `protobuf:"varint,4,opt,name=byte_in_count,json=byteInCount,proto3" json:"byte_in_count,omitempty"`
+	DurationSec          uint32               `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32               `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	BandStats            []*OfpMeterBandStats `protobuf:"bytes,7,rep,name=band_stats,json=bandStats,proto3" json:"band_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *OfpMeterStats) Reset()         { *m = OfpMeterStats{} }
+func (m *OfpMeterStats) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterStats) ProtoMessage()    {}
+func (*OfpMeterStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{67}
+}
+
+func (m *OfpMeterStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterStats.Unmarshal(m, b)
+}
+func (m *OfpMeterStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterStats.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterStats.Merge(m, src)
+}
+func (m *OfpMeterStats) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterStats.Size(m)
+}
+func (m *OfpMeterStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterStats proto.InternalMessageInfo
+
+func (m *OfpMeterStats) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetFlowCount() uint32 {
+	if m != nil {
+		return m.FlowCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetPacketInCount() uint64 {
+	if m != nil {
+		return m.PacketInCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetByteInCount() uint64 {
+	if m != nil {
+		return m.ByteInCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetBandStats() []*OfpMeterBandStats {
+	if m != nil {
+		return m.BandStats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_METER_CONFIG request. Meter configuration.
+type OfpMeterConfig struct {
+	Flags                uint32                `protobuf:"varint,1,opt,name=flags,proto3" json:"flags,omitempty"`
+	MeterId              uint32                `protobuf:"varint,2,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	Bands                []*OfpMeterBandHeader `protobuf:"bytes,3,rep,name=bands,proto3" json:"bands,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpMeterConfig) Reset()         { *m = OfpMeterConfig{} }
+func (m *OfpMeterConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterConfig) ProtoMessage()    {}
+func (*OfpMeterConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{68}
+}
+
+func (m *OfpMeterConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterConfig.Unmarshal(m, b)
+}
+func (m *OfpMeterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterConfig.Merge(m, src)
+}
+func (m *OfpMeterConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterConfig.Size(m)
+}
+func (m *OfpMeterConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterConfig proto.InternalMessageInfo
+
+func (m *OfpMeterConfig) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMeterConfig) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterConfig) GetBands() []*OfpMeterBandHeader {
+	if m != nil {
+		return m.Bands
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_METER_FEATURES request. Meter features.
+type OfpMeterFeatures struct {
+	MaxMeter             uint32   `protobuf:"varint,1,opt,name=max_meter,json=maxMeter,proto3" json:"max_meter,omitempty"`
+	BandTypes            uint32   `protobuf:"varint,2,opt,name=band_types,json=bandTypes,proto3" json:"band_types,omitempty"`
+	Capabilities         uint32   `protobuf:"varint,3,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	MaxBands             uint32   `protobuf:"varint,4,opt,name=max_bands,json=maxBands,proto3" json:"max_bands,omitempty"`
+	MaxColor             uint32   `protobuf:"varint,5,opt,name=max_color,json=maxColor,proto3" json:"max_color,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterFeatures) Reset()         { *m = OfpMeterFeatures{} }
+func (m *OfpMeterFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterFeatures) ProtoMessage()    {}
+func (*OfpMeterFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{69}
+}
+
+func (m *OfpMeterFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterFeatures.Unmarshal(m, b)
+}
+func (m *OfpMeterFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterFeatures.Merge(m, src)
+}
+func (m *OfpMeterFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterFeatures.Size(m)
+}
+func (m *OfpMeterFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterFeatures proto.InternalMessageInfo
+
+func (m *OfpMeterFeatures) GetMaxMeter() uint32 {
+	if m != nil {
+		return m.MaxMeter
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetBandTypes() uint32 {
+	if m != nil {
+		return m.BandTypes
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetMaxBands() uint32 {
+	if m != nil {
+		return m.MaxBands
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetMaxColor() uint32 {
+	if m != nil {
+		return m.MaxColor
+	}
+	return 0
+}
+
+type OfpMeterEntry struct {
+	Config               *OfpMeterConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+	Stats                *OfpMeterStats  `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *OfpMeterEntry) Reset()         { *m = OfpMeterEntry{} }
+func (m *OfpMeterEntry) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterEntry) ProtoMessage()    {}
+func (*OfpMeterEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{70}
+}
+
+func (m *OfpMeterEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterEntry.Unmarshal(m, b)
+}
+func (m *OfpMeterEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterEntry.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterEntry.Merge(m, src)
+}
+func (m *OfpMeterEntry) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterEntry.Size(m)
+}
+func (m *OfpMeterEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterEntry proto.InternalMessageInfo
+
+func (m *OfpMeterEntry) GetConfig() *OfpMeterConfig {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *OfpMeterEntry) GetStats() *OfpMeterStats {
+	if m != nil {
+		return m.Stats
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request/reply of type OFPMP_EXPERIMENTER.
+type OfpExperimenterMultipartHeader struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Data                 []byte   `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpExperimenterMultipartHeader) Reset()         { *m = OfpExperimenterMultipartHeader{} }
+func (m *OfpExperimenterMultipartHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpExperimenterMultipartHeader) ProtoMessage()    {}
+func (*OfpExperimenterMultipartHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{71}
+}
+
+func (m *OfpExperimenterMultipartHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Unmarshal(m, b)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpExperimenterMultipartHeader.Merge(m, src)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Size(m)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpExperimenterMultipartHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpExperimenterMultipartHeader proto.InternalMessageInfo
+
+func (m *OfpExperimenterMultipartHeader) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpExperimenterMultipartHeader) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpExperimenterMultipartHeader) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Experimenter extension.
+type OfpExperimenterHeader struct {
+	//ofp_header header;  /* Type OFPT_EXPERIMENTER. */
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Data                 []byte   `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpExperimenterHeader) Reset()         { *m = OfpExperimenterHeader{} }
+func (m *OfpExperimenterHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpExperimenterHeader) ProtoMessage()    {}
+func (*OfpExperimenterHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{72}
+}
+
+func (m *OfpExperimenterHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpExperimenterHeader.Unmarshal(m, b)
+}
+func (m *OfpExperimenterHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpExperimenterHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpExperimenterHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpExperimenterHeader.Merge(m, src)
+}
+func (m *OfpExperimenterHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpExperimenterHeader.Size(m)
+}
+func (m *OfpExperimenterHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpExperimenterHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpExperimenterHeader proto.InternalMessageInfo
+
+func (m *OfpExperimenterHeader) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpExperimenterHeader) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpExperimenterHeader) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Common description for a queue.
+type OfpQueuePropHeader struct {
+	Property             uint32   `protobuf:"varint,1,opt,name=property,proto3" json:"property,omitempty"`
+	Len                  uint32   `protobuf:"varint,2,opt,name=len,proto3" json:"len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueuePropHeader) Reset()         { *m = OfpQueuePropHeader{} }
+func (m *OfpQueuePropHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropHeader) ProtoMessage()    {}
+func (*OfpQueuePropHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{73}
+}
+
+func (m *OfpQueuePropHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropHeader.Unmarshal(m, b)
+}
+func (m *OfpQueuePropHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropHeader.Merge(m, src)
+}
+func (m *OfpQueuePropHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropHeader.Size(m)
+}
+func (m *OfpQueuePropHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropHeader proto.InternalMessageInfo
+
+func (m *OfpQueuePropHeader) GetProperty() uint32 {
+	if m != nil {
+		return m.Property
+	}
+	return 0
+}
+
+func (m *OfpQueuePropHeader) GetLen() uint32 {
+	if m != nil {
+		return m.Len
+	}
+	return 0
+}
+
+// Min-Rate queue property description.
+type OfpQueuePropMinRate struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Rate                 uint32              `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropMinRate) Reset()         { *m = OfpQueuePropMinRate{} }
+func (m *OfpQueuePropMinRate) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropMinRate) ProtoMessage()    {}
+func (*OfpQueuePropMinRate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{74}
+}
+
+func (m *OfpQueuePropMinRate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropMinRate.Unmarshal(m, b)
+}
+func (m *OfpQueuePropMinRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropMinRate.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropMinRate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropMinRate.Merge(m, src)
+}
+func (m *OfpQueuePropMinRate) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropMinRate.Size(m)
+}
+func (m *OfpQueuePropMinRate) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropMinRate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropMinRate proto.InternalMessageInfo
+
+func (m *OfpQueuePropMinRate) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropMinRate) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+// Max-Rate queue property description.
+type OfpQueuePropMaxRate struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Rate                 uint32              `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropMaxRate) Reset()         { *m = OfpQueuePropMaxRate{} }
+func (m *OfpQueuePropMaxRate) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropMaxRate) ProtoMessage()    {}
+func (*OfpQueuePropMaxRate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{75}
+}
+
+func (m *OfpQueuePropMaxRate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Unmarshal(m, b)
+}
+func (m *OfpQueuePropMaxRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropMaxRate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropMaxRate.Merge(m, src)
+}
+func (m *OfpQueuePropMaxRate) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Size(m)
+}
+func (m *OfpQueuePropMaxRate) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropMaxRate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropMaxRate proto.InternalMessageInfo
+
+func (m *OfpQueuePropMaxRate) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropMaxRate) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+// Experimenter queue property description.
+type OfpQueuePropExperimenter struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Experimenter         uint32              `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte              `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropExperimenter) Reset()         { *m = OfpQueuePropExperimenter{} }
+func (m *OfpQueuePropExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropExperimenter) ProtoMessage()    {}
+func (*OfpQueuePropExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{76}
+}
+
+func (m *OfpQueuePropExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Unmarshal(m, b)
+}
+func (m *OfpQueuePropExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropExperimenter.Merge(m, src)
+}
+func (m *OfpQueuePropExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Size(m)
+}
+func (m *OfpQueuePropExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropExperimenter proto.InternalMessageInfo
+
+func (m *OfpQueuePropExperimenter) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpQueuePropExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Full description for a queue.
+type OfpPacketQueue struct {
+	QueueId              uint32                `protobuf:"varint,1,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	Port                 uint32                `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+	Properties           []*OfpQueuePropHeader `protobuf:"bytes,4,rep,name=properties,proto3" json:"properties,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpPacketQueue) Reset()         { *m = OfpPacketQueue{} }
+func (m *OfpPacketQueue) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketQueue) ProtoMessage()    {}
+func (*OfpPacketQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{77}
+}
+
+func (m *OfpPacketQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketQueue.Unmarshal(m, b)
+}
+func (m *OfpPacketQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketQueue.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketQueue.Merge(m, src)
+}
+func (m *OfpPacketQueue) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketQueue.Size(m)
+}
+func (m *OfpPacketQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketQueue proto.InternalMessageInfo
+
+func (m *OfpPacketQueue) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+func (m *OfpPacketQueue) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpPacketQueue) GetProperties() []*OfpQueuePropHeader {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+// Query for port queue configuration.
+type OfpQueueGetConfigRequest struct {
+	//ofp_header header;
+	Port                 uint32   `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueGetConfigRequest) Reset()         { *m = OfpQueueGetConfigRequest{} }
+func (m *OfpQueueGetConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueGetConfigRequest) ProtoMessage()    {}
+func (*OfpQueueGetConfigRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{78}
+}
+
+func (m *OfpQueueGetConfigRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Unmarshal(m, b)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueGetConfigRequest.Merge(m, src)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Size(m)
+}
+func (m *OfpQueueGetConfigRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueGetConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueGetConfigRequest proto.InternalMessageInfo
+
+func (m *OfpQueueGetConfigRequest) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+// Queue configuration for a given port.
+type OfpQueueGetConfigReply struct {
+	//ofp_header header;
+	Port                 uint32            `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	Queues               []*OfpPacketQueue `protobuf:"bytes,2,rep,name=queues,proto3" json:"queues,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpQueueGetConfigReply) Reset()         { *m = OfpQueueGetConfigReply{} }
+func (m *OfpQueueGetConfigReply) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueGetConfigReply) ProtoMessage()    {}
+func (*OfpQueueGetConfigReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{79}
+}
+
+func (m *OfpQueueGetConfigReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Unmarshal(m, b)
+}
+func (m *OfpQueueGetConfigReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueGetConfigReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueGetConfigReply.Merge(m, src)
+}
+func (m *OfpQueueGetConfigReply) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Size(m)
+}
+func (m *OfpQueueGetConfigReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueGetConfigReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueGetConfigReply proto.InternalMessageInfo
+
+func (m *OfpQueueGetConfigReply) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpQueueGetConfigReply) GetQueues() []*OfpPacketQueue {
+	if m != nil {
+		return m.Queues
+	}
+	return nil
+}
+
+// OFPAT_SET_QUEUE action struct: send packets to given queue on port.
+type OfpActionSetQueue struct {
+	Type                 uint32   `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	QueueId              uint32   `protobuf:"varint,3,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionSetQueue) Reset()         { *m = OfpActionSetQueue{} }
+func (m *OfpActionSetQueue) String() string { return proto.CompactTextString(m) }
+func (*OfpActionSetQueue) ProtoMessage()    {}
+func (*OfpActionSetQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{80}
+}
+
+func (m *OfpActionSetQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionSetQueue.Unmarshal(m, b)
+}
+func (m *OfpActionSetQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionSetQueue.Marshal(b, m, deterministic)
+}
+func (m *OfpActionSetQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionSetQueue.Merge(m, src)
+}
+func (m *OfpActionSetQueue) XXX_Size() int {
+	return xxx_messageInfo_OfpActionSetQueue.Size(m)
+}
+func (m *OfpActionSetQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionSetQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionSetQueue proto.InternalMessageInfo
+
+func (m *OfpActionSetQueue) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpActionSetQueue) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+type OfpQueueStatsRequest struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	QueueId              uint32   `protobuf:"varint,2,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueStatsRequest) Reset()         { *m = OfpQueueStatsRequest{} }
+func (m *OfpQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueStatsRequest) ProtoMessage()    {}
+func (*OfpQueueStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{81}
+}
+
+func (m *OfpQueueStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpQueueStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueStatsRequest.Merge(m, src)
+}
+func (m *OfpQueueStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueStatsRequest.Size(m)
+}
+func (m *OfpQueueStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueStatsRequest proto.InternalMessageInfo
+
+func (m *OfpQueueStatsRequest) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpQueueStatsRequest) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+type OfpQueueStats struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	QueueId              uint32   `protobuf:"varint,2,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	TxBytes              uint64   `protobuf:"varint,3,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"varint,4,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxErrors             uint64   `protobuf:"varint,5,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	DurationSec          uint32   `protobuf:"varint,6,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32   `protobuf:"varint,7,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueStats) Reset()         { *m = OfpQueueStats{} }
+func (m *OfpQueueStats) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueStats) ProtoMessage()    {}
+func (*OfpQueueStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{82}
+}
+
+func (m *OfpQueueStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueStats.Unmarshal(m, b)
+}
+func (m *OfpQueueStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueStats.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueStats.Merge(m, src)
+}
+func (m *OfpQueueStats) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueStats.Size(m)
+}
+func (m *OfpQueueStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueStats proto.InternalMessageInfo
+
+func (m *OfpQueueStats) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+// Role request and reply message.
+type OfpRoleRequest struct {
+	//ofp_header header;        /* Type OFPT_ROLE_REQUEST/OFPT_ROLE_REPLY. */
+	Role                 OfpControllerRole `protobuf:"varint,1,opt,name=role,proto3,enum=openflow_13.OfpControllerRole" json:"role,omitempty"`
+	GenerationId         uint64            `protobuf:"varint,2,opt,name=generation_id,json=generationId,proto3" json:"generation_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpRoleRequest) Reset()         { *m = OfpRoleRequest{} }
+func (m *OfpRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpRoleRequest) ProtoMessage()    {}
+func (*OfpRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{83}
+}
+
+func (m *OfpRoleRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpRoleRequest.Unmarshal(m, b)
+}
+func (m *OfpRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpRoleRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpRoleRequest.Merge(m, src)
+}
+func (m *OfpRoleRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpRoleRequest.Size(m)
+}
+func (m *OfpRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpRoleRequest proto.InternalMessageInfo
+
+func (m *OfpRoleRequest) GetRole() OfpControllerRole {
+	if m != nil {
+		return m.Role
+	}
+	return OfpControllerRole_OFPCR_ROLE_NOCHANGE
+}
+
+func (m *OfpRoleRequest) GetGenerationId() uint64 {
+	if m != nil {
+		return m.GenerationId
+	}
+	return 0
+}
+
+// Asynchronous message configuration.
+type OfpAsyncConfig struct {
+	//ofp_header header;    /* OFPT_GET_ASYNC_REPLY or OFPT_SET_ASYNC. */
+	PacketInMask         []uint32 `protobuf:"varint,1,rep,packed,name=packet_in_mask,json=packetInMask,proto3" json:"packet_in_mask,omitempty"`
+	PortStatusMask       []uint32 `protobuf:"varint,2,rep,packed,name=port_status_mask,json=portStatusMask,proto3" json:"port_status_mask,omitempty"`
+	FlowRemovedMask      []uint32 `protobuf:"varint,3,rep,packed,name=flow_removed_mask,json=flowRemovedMask,proto3" json:"flow_removed_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpAsyncConfig) Reset()         { *m = OfpAsyncConfig{} }
+func (m *OfpAsyncConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpAsyncConfig) ProtoMessage()    {}
+func (*OfpAsyncConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{84}
+}
+
+func (m *OfpAsyncConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAsyncConfig.Unmarshal(m, b)
+}
+func (m *OfpAsyncConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAsyncConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpAsyncConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAsyncConfig.Merge(m, src)
+}
+func (m *OfpAsyncConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpAsyncConfig.Size(m)
+}
+func (m *OfpAsyncConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAsyncConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAsyncConfig proto.InternalMessageInfo
+
+func (m *OfpAsyncConfig) GetPacketInMask() []uint32 {
+	if m != nil {
+		return m.PacketInMask
+	}
+	return nil
+}
+
+func (m *OfpAsyncConfig) GetPortStatusMask() []uint32 {
+	if m != nil {
+		return m.PortStatusMask
+	}
+	return nil
+}
+
+func (m *OfpAsyncConfig) GetFlowRemovedMask() []uint32 {
+	if m != nil {
+		return m.FlowRemovedMask
+	}
+	return nil
+}
+
+type MeterModUpdate struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	MeterMod             *OfpMeterMod `protobuf:"bytes,2,opt,name=meter_mod,json=meterMod,proto3" json:"meter_mod,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *MeterModUpdate) Reset()         { *m = MeterModUpdate{} }
+func (m *MeterModUpdate) String() string { return proto.CompactTextString(m) }
+func (*MeterModUpdate) ProtoMessage()    {}
+func (*MeterModUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{85}
+}
+
+func (m *MeterModUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MeterModUpdate.Unmarshal(m, b)
+}
+func (m *MeterModUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MeterModUpdate.Marshal(b, m, deterministic)
+}
+func (m *MeterModUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MeterModUpdate.Merge(m, src)
+}
+func (m *MeterModUpdate) XXX_Size() int {
+	return xxx_messageInfo_MeterModUpdate.Size(m)
+}
+func (m *MeterModUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_MeterModUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MeterModUpdate proto.InternalMessageInfo
+
+func (m *MeterModUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *MeterModUpdate) GetMeterMod() *OfpMeterMod {
+	if m != nil {
+		return m.MeterMod
+	}
+	return nil
+}
+
+type MeterStatsReply struct {
+	MeterStats           []*OfpMeterStats `protobuf:"bytes,1,rep,name=meter_stats,json=meterStats,proto3" json:"meter_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *MeterStatsReply) Reset()         { *m = MeterStatsReply{} }
+func (m *MeterStatsReply) String() string { return proto.CompactTextString(m) }
+func (*MeterStatsReply) ProtoMessage()    {}
+func (*MeterStatsReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{86}
+}
+
+func (m *MeterStatsReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MeterStatsReply.Unmarshal(m, b)
+}
+func (m *MeterStatsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MeterStatsReply.Marshal(b, m, deterministic)
+}
+func (m *MeterStatsReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MeterStatsReply.Merge(m, src)
+}
+func (m *MeterStatsReply) XXX_Size() int {
+	return xxx_messageInfo_MeterStatsReply.Size(m)
+}
+func (m *MeterStatsReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_MeterStatsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MeterStatsReply proto.InternalMessageInfo
+
+func (m *MeterStatsReply) GetMeterStats() []*OfpMeterStats {
+	if m != nil {
+		return m.MeterStats
+	}
+	return nil
+}
+
+type FlowTableUpdate struct {
+	Id                   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	FlowMod              *OfpFlowMod `protobuf:"bytes,2,opt,name=flow_mod,json=flowMod,proto3" json:"flow_mod,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *FlowTableUpdate) Reset()         { *m = FlowTableUpdate{} }
+func (m *FlowTableUpdate) String() string { return proto.CompactTextString(m) }
+func (*FlowTableUpdate) ProtoMessage()    {}
+func (*FlowTableUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{87}
+}
+
+func (m *FlowTableUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowTableUpdate.Unmarshal(m, b)
+}
+func (m *FlowTableUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowTableUpdate.Marshal(b, m, deterministic)
+}
+func (m *FlowTableUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowTableUpdate.Merge(m, src)
+}
+func (m *FlowTableUpdate) XXX_Size() int {
+	return xxx_messageInfo_FlowTableUpdate.Size(m)
+}
+func (m *FlowTableUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowTableUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowTableUpdate proto.InternalMessageInfo
+
+func (m *FlowTableUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *FlowTableUpdate) GetFlowMod() *OfpFlowMod {
+	if m != nil {
+		return m.FlowMod
+	}
+	return nil
+}
+
+type FlowGroupTableUpdate struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	GroupMod             *OfpGroupMod `protobuf:"bytes,2,opt,name=group_mod,json=groupMod,proto3" json:"group_mod,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *FlowGroupTableUpdate) Reset()         { *m = FlowGroupTableUpdate{} }
+func (m *FlowGroupTableUpdate) String() string { return proto.CompactTextString(m) }
+func (*FlowGroupTableUpdate) ProtoMessage()    {}
+func (*FlowGroupTableUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{88}
+}
+
+func (m *FlowGroupTableUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroupTableUpdate.Unmarshal(m, b)
+}
+func (m *FlowGroupTableUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroupTableUpdate.Marshal(b, m, deterministic)
+}
+func (m *FlowGroupTableUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroupTableUpdate.Merge(m, src)
+}
+func (m *FlowGroupTableUpdate) XXX_Size() int {
+	return xxx_messageInfo_FlowGroupTableUpdate.Size(m)
+}
+func (m *FlowGroupTableUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroupTableUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroupTableUpdate proto.InternalMessageInfo
+
+func (m *FlowGroupTableUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *FlowGroupTableUpdate) GetGroupMod() *OfpGroupMod {
+	if m != nil {
+		return m.GroupMod
+	}
+	return nil
+}
+
+type Flows struct {
+	Items                []*OfpFlowStats `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Flows) Reset()         { *m = Flows{} }
+func (m *Flows) String() string { return proto.CompactTextString(m) }
+func (*Flows) ProtoMessage()    {}
+func (*Flows) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{89}
+}
+
+func (m *Flows) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Flows.Unmarshal(m, b)
+}
+func (m *Flows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Flows.Marshal(b, m, deterministic)
+}
+func (m *Flows) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Flows.Merge(m, src)
+}
+func (m *Flows) XXX_Size() int {
+	return xxx_messageInfo_Flows.Size(m)
+}
+func (m *Flows) XXX_DiscardUnknown() {
+	xxx_messageInfo_Flows.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Flows proto.InternalMessageInfo
+
+func (m *Flows) GetItems() []*OfpFlowStats {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type Meters struct {
+	Items                []*OfpMeterEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Meters) Reset()         { *m = Meters{} }
+func (m *Meters) String() string { return proto.CompactTextString(m) }
+func (*Meters) ProtoMessage()    {}
+func (*Meters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{90}
+}
+
+func (m *Meters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Meters.Unmarshal(m, b)
+}
+func (m *Meters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Meters.Marshal(b, m, deterministic)
+}
+func (m *Meters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Meters.Merge(m, src)
+}
+func (m *Meters) XXX_Size() int {
+	return xxx_messageInfo_Meters.Size(m)
+}
+func (m *Meters) XXX_DiscardUnknown() {
+	xxx_messageInfo_Meters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Meters proto.InternalMessageInfo
+
+func (m *Meters) GetItems() []*OfpMeterEntry {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type FlowGroups struct {
+	Items                []*OfpGroupEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *FlowGroups) Reset()         { *m = FlowGroups{} }
+func (m *FlowGroups) String() string { return proto.CompactTextString(m) }
+func (*FlowGroups) ProtoMessage()    {}
+func (*FlowGroups) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{91}
+}
+
+func (m *FlowGroups) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroups.Unmarshal(m, b)
+}
+func (m *FlowGroups) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroups.Marshal(b, m, deterministic)
+}
+func (m *FlowGroups) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroups.Merge(m, src)
+}
+func (m *FlowGroups) XXX_Size() int {
+	return xxx_messageInfo_FlowGroups.Size(m)
+}
+func (m *FlowGroups) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroups.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroups proto.InternalMessageInfo
+
+func (m *FlowGroups) GetItems() []*OfpGroupEntry {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type FlowChanges struct {
+	ToAdd                *Flows   `protobuf:"bytes,1,opt,name=to_add,json=toAdd,proto3" json:"to_add,omitempty"`
+	ToRemove             *Flows   `protobuf:"bytes,2,opt,name=to_remove,json=toRemove,proto3" json:"to_remove,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FlowChanges) Reset()         { *m = FlowChanges{} }
+func (m *FlowChanges) String() string { return proto.CompactTextString(m) }
+func (*FlowChanges) ProtoMessage()    {}
+func (*FlowChanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{92}
+}
+
+func (m *FlowChanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowChanges.Unmarshal(m, b)
+}
+func (m *FlowChanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowChanges.Marshal(b, m, deterministic)
+}
+func (m *FlowChanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowChanges.Merge(m, src)
+}
+func (m *FlowChanges) XXX_Size() int {
+	return xxx_messageInfo_FlowChanges.Size(m)
+}
+func (m *FlowChanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowChanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowChanges proto.InternalMessageInfo
+
+func (m *FlowChanges) GetToAdd() *Flows {
+	if m != nil {
+		return m.ToAdd
+	}
+	return nil
+}
+
+func (m *FlowChanges) GetToRemove() *Flows {
+	if m != nil {
+		return m.ToRemove
+	}
+	return nil
+}
+
+type FlowGroupChanges struct {
+	ToAdd                *FlowGroups `protobuf:"bytes,1,opt,name=to_add,json=toAdd,proto3" json:"to_add,omitempty"`
+	ToRemove             *FlowGroups `protobuf:"bytes,2,opt,name=to_remove,json=toRemove,proto3" json:"to_remove,omitempty"`
+	ToUpdate             *FlowGroups `protobuf:"bytes,3,opt,name=to_update,json=toUpdate,proto3" json:"to_update,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *FlowGroupChanges) Reset()         { *m = FlowGroupChanges{} }
+func (m *FlowGroupChanges) String() string { return proto.CompactTextString(m) }
+func (*FlowGroupChanges) ProtoMessage()    {}
+func (*FlowGroupChanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{93}
+}
+
+func (m *FlowGroupChanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroupChanges.Unmarshal(m, b)
+}
+func (m *FlowGroupChanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroupChanges.Marshal(b, m, deterministic)
+}
+func (m *FlowGroupChanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroupChanges.Merge(m, src)
+}
+func (m *FlowGroupChanges) XXX_Size() int {
+	return xxx_messageInfo_FlowGroupChanges.Size(m)
+}
+func (m *FlowGroupChanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroupChanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroupChanges proto.InternalMessageInfo
+
+func (m *FlowGroupChanges) GetToAdd() *FlowGroups {
+	if m != nil {
+		return m.ToAdd
+	}
+	return nil
+}
+
+func (m *FlowGroupChanges) GetToRemove() *FlowGroups {
+	if m != nil {
+		return m.ToRemove
+	}
+	return nil
+}
+
+func (m *FlowGroupChanges) GetToUpdate() *FlowGroups {
+	if m != nil {
+		return m.ToUpdate
+	}
+	return nil
+}
+
+type PacketIn struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	PacketIn             *OfpPacketIn `protobuf:"bytes,2,opt,name=packet_in,json=packetIn,proto3" json:"packet_in,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *PacketIn) Reset()         { *m = PacketIn{} }
+func (m *PacketIn) String() string { return proto.CompactTextString(m) }
+func (*PacketIn) ProtoMessage()    {}
+func (*PacketIn) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{94}
+}
+
+func (m *PacketIn) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketIn.Unmarshal(m, b)
+}
+func (m *PacketIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketIn.Marshal(b, m, deterministic)
+}
+func (m *PacketIn) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketIn.Merge(m, src)
+}
+func (m *PacketIn) XXX_Size() int {
+	return xxx_messageInfo_PacketIn.Size(m)
+}
+func (m *PacketIn) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketIn.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketIn proto.InternalMessageInfo
+
+func (m *PacketIn) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PacketIn) GetPacketIn() *OfpPacketIn {
+	if m != nil {
+		return m.PacketIn
+	}
+	return nil
+}
+
+type PacketOut struct {
+	Id                   string        `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	PacketOut            *OfpPacketOut `protobuf:"bytes,2,opt,name=packet_out,json=packetOut,proto3" json:"packet_out,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *PacketOut) Reset()         { *m = PacketOut{} }
+func (m *PacketOut) String() string { return proto.CompactTextString(m) }
+func (*PacketOut) ProtoMessage()    {}
+func (*PacketOut) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{95}
+}
+
+func (m *PacketOut) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketOut.Unmarshal(m, b)
+}
+func (m *PacketOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketOut.Marshal(b, m, deterministic)
+}
+func (m *PacketOut) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketOut.Merge(m, src)
+}
+func (m *PacketOut) XXX_Size() int {
+	return xxx_messageInfo_PacketOut.Size(m)
+}
+func (m *PacketOut) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketOut.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketOut proto.InternalMessageInfo
+
+func (m *PacketOut) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PacketOut) GetPacketOut() *OfpPacketOut {
+	if m != nil {
+		return m.PacketOut
+	}
+	return nil
+}
+
+type ChangeEvent struct {
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Types that are valid to be assigned to Event:
+	//	*ChangeEvent_PortStatus
+	Event                isChangeEvent_Event `protobuf_oneof:"event"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *ChangeEvent) Reset()         { *m = ChangeEvent{} }
+func (m *ChangeEvent) String() string { return proto.CompactTextString(m) }
+func (*ChangeEvent) ProtoMessage()    {}
+func (*ChangeEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{96}
+}
+
+func (m *ChangeEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ChangeEvent.Unmarshal(m, b)
+}
+func (m *ChangeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ChangeEvent.Marshal(b, m, deterministic)
+}
+func (m *ChangeEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ChangeEvent.Merge(m, src)
+}
+func (m *ChangeEvent) XXX_Size() int {
+	return xxx_messageInfo_ChangeEvent.Size(m)
+}
+func (m *ChangeEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_ChangeEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChangeEvent proto.InternalMessageInfo
+
+func (m *ChangeEvent) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+type isChangeEvent_Event interface {
+	isChangeEvent_Event()
+}
+
+type ChangeEvent_PortStatus struct {
+	PortStatus *OfpPortStatus `protobuf:"bytes,2,opt,name=port_status,json=portStatus,proto3,oneof"`
+}
+
+func (*ChangeEvent_PortStatus) isChangeEvent_Event() {}
+
+func (m *ChangeEvent) GetEvent() isChangeEvent_Event {
+	if m != nil {
+		return m.Event
+	}
+	return nil
+}
+
+func (m *ChangeEvent) GetPortStatus() *OfpPortStatus {
+	if x, ok := m.GetEvent().(*ChangeEvent_PortStatus); ok {
+		return x.PortStatus
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ChangeEvent) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ChangeEvent_PortStatus)(nil),
+	}
+}
+
+func init() {
+	proto.RegisterEnum("openflow_13.OfpPortNo", OfpPortNo_name, OfpPortNo_value)
+	proto.RegisterEnum("openflow_13.OfpType", OfpType_name, OfpType_value)
+	proto.RegisterEnum("openflow_13.OfpHelloElemType", OfpHelloElemType_name, OfpHelloElemType_value)
+	proto.RegisterEnum("openflow_13.OfpConfigFlags", OfpConfigFlags_name, OfpConfigFlags_value)
+	proto.RegisterEnum("openflow_13.OfpTableConfig", OfpTableConfig_name, OfpTableConfig_value)
+	proto.RegisterEnum("openflow_13.OfpTable", OfpTable_name, OfpTable_value)
+	proto.RegisterEnum("openflow_13.OfpCapabilities", OfpCapabilities_name, OfpCapabilities_value)
+	proto.RegisterEnum("openflow_13.OfpPortConfig", OfpPortConfig_name, OfpPortConfig_value)
+	proto.RegisterEnum("openflow_13.OfpPortState", OfpPortState_name, OfpPortState_value)
+	proto.RegisterEnum("openflow_13.OfpPortFeatures", OfpPortFeatures_name, OfpPortFeatures_value)
+	proto.RegisterEnum("openflow_13.OfpPortReason", OfpPortReason_name, OfpPortReason_value)
+	proto.RegisterEnum("openflow_13.OfpMatchType", OfpMatchType_name, OfpMatchType_value)
+	proto.RegisterEnum("openflow_13.OfpOxmClass", OfpOxmClass_name, OfpOxmClass_value)
+	proto.RegisterEnum("openflow_13.OxmOfbFieldTypes", OxmOfbFieldTypes_name, OxmOfbFieldTypes_value)
+	proto.RegisterEnum("openflow_13.OfpVlanId", OfpVlanId_name, OfpVlanId_value)
+	proto.RegisterEnum("openflow_13.OfpIpv6ExthdrFlags", OfpIpv6ExthdrFlags_name, OfpIpv6ExthdrFlags_value)
+	proto.RegisterEnum("openflow_13.OfpActionType", OfpActionType_name, OfpActionType_value)
+	proto.RegisterEnum("openflow_13.OfpControllerMaxLen", OfpControllerMaxLen_name, OfpControllerMaxLen_value)
+	proto.RegisterEnum("openflow_13.OfpInstructionType", OfpInstructionType_name, OfpInstructionType_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModCommand", OfpFlowModCommand_name, OfpFlowModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModFlags", OfpFlowModFlags_name, OfpFlowModFlags_value)
+	proto.RegisterEnum("openflow_13.OfpGroup", OfpGroup_name, OfpGroup_value)
+	proto.RegisterEnum("openflow_13.OfpGroupModCommand", OfpGroupModCommand_name, OfpGroupModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpGroupType", OfpGroupType_name, OfpGroupType_value)
+	proto.RegisterEnum("openflow_13.OfpPacketInReason", OfpPacketInReason_name, OfpPacketInReason_value)
+	proto.RegisterEnum("openflow_13.OfpFlowRemovedReason", OfpFlowRemovedReason_name, OfpFlowRemovedReason_value)
+	proto.RegisterEnum("openflow_13.OfpMeter", OfpMeter_name, OfpMeter_value)
+	proto.RegisterEnum("openflow_13.OfpMeterBandType", OfpMeterBandType_name, OfpMeterBandType_value)
+	proto.RegisterEnum("openflow_13.OfpMeterModCommand", OfpMeterModCommand_name, OfpMeterModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpMeterFlags", OfpMeterFlags_name, OfpMeterFlags_value)
+	proto.RegisterEnum("openflow_13.OfpErrorType", OfpErrorType_name, OfpErrorType_value)
+	proto.RegisterEnum("openflow_13.OfpHelloFailedCode", OfpHelloFailedCode_name, OfpHelloFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadRequestCode", OfpBadRequestCode_name, OfpBadRequestCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadActionCode", OfpBadActionCode_name, OfpBadActionCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadInstructionCode", OfpBadInstructionCode_name, OfpBadInstructionCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadMatchCode", OfpBadMatchCode_name, OfpBadMatchCode_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModFailedCode", OfpFlowModFailedCode_name, OfpFlowModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpGroupModFailedCode", OfpGroupModFailedCode_name, OfpGroupModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpPortModFailedCode", OfpPortModFailedCode_name, OfpPortModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpTableModFailedCode", OfpTableModFailedCode_name, OfpTableModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpQueueOpFailedCode", OfpQueueOpFailedCode_name, OfpQueueOpFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpSwitchConfigFailedCode", OfpSwitchConfigFailedCode_name, OfpSwitchConfigFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpRoleRequestFailedCode", OfpRoleRequestFailedCode_name, OfpRoleRequestFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpMeterModFailedCode", OfpMeterModFailedCode_name, OfpMeterModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpTableFeaturesFailedCode", OfpTableFeaturesFailedCode_name, OfpTableFeaturesFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartType", OfpMultipartType_name, OfpMultipartType_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartRequestFlags", OfpMultipartRequestFlags_name, OfpMultipartRequestFlags_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartReplyFlags", OfpMultipartReplyFlags_name, OfpMultipartReplyFlags_value)
+	proto.RegisterEnum("openflow_13.OfpTableFeaturePropType", OfpTableFeaturePropType_name, OfpTableFeaturePropType_value)
+	proto.RegisterEnum("openflow_13.OfpGroupCapabilities", OfpGroupCapabilities_name, OfpGroupCapabilities_value)
+	proto.RegisterEnum("openflow_13.OfpQueueProperties", OfpQueueProperties_name, OfpQueueProperties_value)
+	proto.RegisterEnum("openflow_13.OfpControllerRole", OfpControllerRole_name, OfpControllerRole_value)
+	proto.RegisterType((*OfpHeader)(nil), "openflow_13.ofp_header")
+	proto.RegisterType((*OfpHelloElemHeader)(nil), "openflow_13.ofp_hello_elem_header")
+	proto.RegisterType((*OfpHelloElemVersionbitmap)(nil), "openflow_13.ofp_hello_elem_versionbitmap")
+	proto.RegisterType((*OfpHello)(nil), "openflow_13.ofp_hello")
+	proto.RegisterType((*OfpSwitchConfig)(nil), "openflow_13.ofp_switch_config")
+	proto.RegisterType((*OfpTableMod)(nil), "openflow_13.ofp_table_mod")
+	proto.RegisterType((*OfpPort)(nil), "openflow_13.ofp_port")
+	proto.RegisterType((*OfpSwitchFeatures)(nil), "openflow_13.ofp_switch_features")
+	proto.RegisterType((*OfpPortStatus)(nil), "openflow_13.ofp_port_status")
+	proto.RegisterType((*OfpPortMod)(nil), "openflow_13.ofp_port_mod")
+	proto.RegisterType((*OfpMatch)(nil), "openflow_13.ofp_match")
+	proto.RegisterType((*OfpOxmField)(nil), "openflow_13.ofp_oxm_field")
+	proto.RegisterType((*OfpOxmOfbField)(nil), "openflow_13.ofp_oxm_ofb_field")
+	proto.RegisterType((*OfpOxmExperimenterField)(nil), "openflow_13.ofp_oxm_experimenter_field")
+	proto.RegisterType((*OfpAction)(nil), "openflow_13.ofp_action")
+	proto.RegisterType((*OfpActionOutput)(nil), "openflow_13.ofp_action_output")
+	proto.RegisterType((*OfpActionMplsTtl)(nil), "openflow_13.ofp_action_mpls_ttl")
+	proto.RegisterType((*OfpActionPush)(nil), "openflow_13.ofp_action_push")
+	proto.RegisterType((*OfpActionPopMpls)(nil), "openflow_13.ofp_action_pop_mpls")
+	proto.RegisterType((*OfpActionGroup)(nil), "openflow_13.ofp_action_group")
+	proto.RegisterType((*OfpActionNwTtl)(nil), "openflow_13.ofp_action_nw_ttl")
+	proto.RegisterType((*OfpActionSetField)(nil), "openflow_13.ofp_action_set_field")
+	proto.RegisterType((*OfpActionExperimenter)(nil), "openflow_13.ofp_action_experimenter")
+	proto.RegisterType((*OfpInstruction)(nil), "openflow_13.ofp_instruction")
+	proto.RegisterType((*OfpInstructionGotoTable)(nil), "openflow_13.ofp_instruction_goto_table")
+	proto.RegisterType((*OfpInstructionWriteMetadata)(nil), "openflow_13.ofp_instruction_write_metadata")
+	proto.RegisterType((*OfpInstructionActions)(nil), "openflow_13.ofp_instruction_actions")
+	proto.RegisterType((*OfpInstructionMeter)(nil), "openflow_13.ofp_instruction_meter")
+	proto.RegisterType((*OfpInstructionExperimenter)(nil), "openflow_13.ofp_instruction_experimenter")
+	proto.RegisterType((*OfpFlowMod)(nil), "openflow_13.ofp_flow_mod")
+	proto.RegisterType((*OfpBucket)(nil), "openflow_13.ofp_bucket")
+	proto.RegisterType((*OfpGroupMod)(nil), "openflow_13.ofp_group_mod")
+	proto.RegisterType((*OfpPacketOut)(nil), "openflow_13.ofp_packet_out")
+	proto.RegisterType((*OfpPacketIn)(nil), "openflow_13.ofp_packet_in")
+	proto.RegisterType((*OfpFlowRemoved)(nil), "openflow_13.ofp_flow_removed")
+	proto.RegisterType((*OfpMeterBandHeader)(nil), "openflow_13.ofp_meter_band_header")
+	proto.RegisterType((*OfpMeterBandDrop)(nil), "openflow_13.ofp_meter_band_drop")
+	proto.RegisterType((*OfpMeterBandDscpRemark)(nil), "openflow_13.ofp_meter_band_dscp_remark")
+	proto.RegisterType((*OfpMeterBandExperimenter)(nil), "openflow_13.ofp_meter_band_experimenter")
+	proto.RegisterType((*OfpMeterMod)(nil), "openflow_13.ofp_meter_mod")
+	proto.RegisterType((*OfpErrorMsg)(nil), "openflow_13.ofp_error_msg")
+	proto.RegisterType((*OfpErrorExperimenterMsg)(nil), "openflow_13.ofp_error_experimenter_msg")
+	proto.RegisterType((*OfpMultipartRequest)(nil), "openflow_13.ofp_multipart_request")
+	proto.RegisterType((*OfpMultipartReply)(nil), "openflow_13.ofp_multipart_reply")
+	proto.RegisterType((*OfpDesc)(nil), "openflow_13.ofp_desc")
+	proto.RegisterType((*OfpFlowStatsRequest)(nil), "openflow_13.ofp_flow_stats_request")
+	proto.RegisterType((*OfpFlowStats)(nil), "openflow_13.ofp_flow_stats")
+	proto.RegisterType((*OfpAggregateStatsRequest)(nil), "openflow_13.ofp_aggregate_stats_request")
+	proto.RegisterType((*OfpAggregateStatsReply)(nil), "openflow_13.ofp_aggregate_stats_reply")
+	proto.RegisterType((*OfpTableFeatureProperty)(nil), "openflow_13.ofp_table_feature_property")
+	proto.RegisterType((*OfpTableFeaturePropInstructions)(nil), "openflow_13.ofp_table_feature_prop_instructions")
+	proto.RegisterType((*OfpTableFeaturePropNextTables)(nil), "openflow_13.ofp_table_feature_prop_next_tables")
+	proto.RegisterType((*OfpTableFeaturePropActions)(nil), "openflow_13.ofp_table_feature_prop_actions")
+	proto.RegisterType((*OfpTableFeaturePropOxm)(nil), "openflow_13.ofp_table_feature_prop_oxm")
+	proto.RegisterType((*OfpTableFeaturePropExperimenter)(nil), "openflow_13.ofp_table_feature_prop_experimenter")
+	proto.RegisterType((*OfpTableFeatures)(nil), "openflow_13.ofp_table_features")
+	proto.RegisterType((*OfpTableStats)(nil), "openflow_13.ofp_table_stats")
+	proto.RegisterType((*OfpPortStatsRequest)(nil), "openflow_13.ofp_port_stats_request")
+	proto.RegisterType((*OfpPortStats)(nil), "openflow_13.ofp_port_stats")
+	proto.RegisterType((*OfpGroupStatsRequest)(nil), "openflow_13.ofp_group_stats_request")
+	proto.RegisterType((*OfpBucketCounter)(nil), "openflow_13.ofp_bucket_counter")
+	proto.RegisterType((*OfpGroupStats)(nil), "openflow_13.ofp_group_stats")
+	proto.RegisterType((*OfpGroupDesc)(nil), "openflow_13.ofp_group_desc")
+	proto.RegisterType((*OfpGroupEntry)(nil), "openflow_13.ofp_group_entry")
+	proto.RegisterType((*OfpGroupFeatures)(nil), "openflow_13.ofp_group_features")
+	proto.RegisterType((*OfpMeterMultipartRequest)(nil), "openflow_13.ofp_meter_multipart_request")
+	proto.RegisterType((*OfpMeterBandStats)(nil), "openflow_13.ofp_meter_band_stats")
+	proto.RegisterType((*OfpMeterStats)(nil), "openflow_13.ofp_meter_stats")
+	proto.RegisterType((*OfpMeterConfig)(nil), "openflow_13.ofp_meter_config")
+	proto.RegisterType((*OfpMeterFeatures)(nil), "openflow_13.ofp_meter_features")
+	proto.RegisterType((*OfpMeterEntry)(nil), "openflow_13.ofp_meter_entry")
+	proto.RegisterType((*OfpExperimenterMultipartHeader)(nil), "openflow_13.ofp_experimenter_multipart_header")
+	proto.RegisterType((*OfpExperimenterHeader)(nil), "openflow_13.ofp_experimenter_header")
+	proto.RegisterType((*OfpQueuePropHeader)(nil), "openflow_13.ofp_queue_prop_header")
+	proto.RegisterType((*OfpQueuePropMinRate)(nil), "openflow_13.ofp_queue_prop_min_rate")
+	proto.RegisterType((*OfpQueuePropMaxRate)(nil), "openflow_13.ofp_queue_prop_max_rate")
+	proto.RegisterType((*OfpQueuePropExperimenter)(nil), "openflow_13.ofp_queue_prop_experimenter")
+	proto.RegisterType((*OfpPacketQueue)(nil), "openflow_13.ofp_packet_queue")
+	proto.RegisterType((*OfpQueueGetConfigRequest)(nil), "openflow_13.ofp_queue_get_config_request")
+	proto.RegisterType((*OfpQueueGetConfigReply)(nil), "openflow_13.ofp_queue_get_config_reply")
+	proto.RegisterType((*OfpActionSetQueue)(nil), "openflow_13.ofp_action_set_queue")
+	proto.RegisterType((*OfpQueueStatsRequest)(nil), "openflow_13.ofp_queue_stats_request")
+	proto.RegisterType((*OfpQueueStats)(nil), "openflow_13.ofp_queue_stats")
+	proto.RegisterType((*OfpRoleRequest)(nil), "openflow_13.ofp_role_request")
+	proto.RegisterType((*OfpAsyncConfig)(nil), "openflow_13.ofp_async_config")
+	proto.RegisterType((*MeterModUpdate)(nil), "openflow_13.MeterModUpdate")
+	proto.RegisterType((*MeterStatsReply)(nil), "openflow_13.MeterStatsReply")
+	proto.RegisterType((*FlowTableUpdate)(nil), "openflow_13.FlowTableUpdate")
+	proto.RegisterType((*FlowGroupTableUpdate)(nil), "openflow_13.FlowGroupTableUpdate")
+	proto.RegisterType((*Flows)(nil), "openflow_13.Flows")
+	proto.RegisterType((*Meters)(nil), "openflow_13.Meters")
+	proto.RegisterType((*FlowGroups)(nil), "openflow_13.FlowGroups")
+	proto.RegisterType((*FlowChanges)(nil), "openflow_13.FlowChanges")
+	proto.RegisterType((*FlowGroupChanges)(nil), "openflow_13.FlowGroupChanges")
+	proto.RegisterType((*PacketIn)(nil), "openflow_13.PacketIn")
+	proto.RegisterType((*PacketOut)(nil), "openflow_13.PacketOut")
+	proto.RegisterType((*ChangeEvent)(nil), "openflow_13.ChangeEvent")
+}
+
+func init() { proto.RegisterFile("voltha_protos/openflow_13.proto", fileDescriptor_08e3a4e375aeddc7) }
+
+var fileDescriptor_08e3a4e375aeddc7 = []byte{
+	// 8423 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x5b, 0x8c, 0x1b, 0x59,
+	0x76, 0x98, 0xf8, 0xe8, 0x6e, 0xf2, 0xb2, 0xbb, 0x55, 0x2a, 0xbd, 0x28, 0xb5, 0x34, 0x92, 0x38,
+	0x33, 0xbb, 0xb3, 0xdc, 0x78, 0x34, 0x7a, 0xac, 0x76, 0xbd, 0x0f, 0x47, 0x45, 0xb2, 0xd8, 0xe4,
+	0x88, 0x2f, 0x55, 0x55, 0xb7, 0xa4, 0x0d, 0x92, 0x02, 0x9b, 0x2c, 0x75, 0xd3, 0x43, 0xb2, 0xb8,
+	0x55, 0xd5, 0xad, 0x96, 0x63, 0x07, 0x4a, 0x8c, 0x20, 0x40, 0x12, 0xdb, 0x09, 0xfc, 0xb1, 0x40,
+	0xe0, 0x00, 0x31, 0x92, 0x7c, 0x04, 0x01, 0xf2, 0x11, 0x20, 0x40, 0x80, 0x7c, 0x1b, 0x48, 0x80,
+	0x20, 0x01, 0x0c, 0x04, 0xfe, 0xb1, 0xff, 0x9c, 0x9f, 0x00, 0xfe, 0x4f, 0x36, 0x9b, 0x55, 0x70,
+	0xee, 0x39, 0xf7, 0xd6, 0x2d, 0x3e, 0x7a, 0x7a, 0x37, 0xb3, 0xf9, 0xf0, 0x97, 0x58, 0xe7, 0x75,
+	0xcf, 0x3d, 0xf7, 0x9c, 0x73, 0xcf, 0x3d, 0x75, 0xab, 0xc5, 0xee, 0x9c, 0xf8, 0xe3, 0xe8, 0xa8,
+	0xef, 0xce, 0x02, 0x3f, 0xf2, 0xc3, 0xfb, 0xfe, 0xcc, 0x9b, 0xbe, 0x1e, 0xfb, 0x6f, 0xdc, 0x07,
+	0x8f, 0x3e, 0xe5, 0x20, 0xbd, 0xa0, 0x80, 0x6e, 0xde, 0x3a, 0xf4, 0xfd, 0xc3, 0xb1, 0x77, 0xbf,
+	0x3f, 0x1b, 0xdd, 0xef, 0x4f, 0xa7, 0x7e, 0xd4, 0x8f, 0x46, 0xfe, 0x34, 0x44, 0xd2, 0xd2, 0x80,
+	0x31, 0xff, 0xf5, 0xcc, 0x3d, 0xf2, 0xfa, 0x43, 0x2f, 0xd0, 0x8b, 0x6c, 0xe3, 0xc4, 0x0b, 0xc2,
+	0x91, 0x3f, 0x2d, 0xa6, 0xee, 0xa6, 0x3e, 0xd9, 0xb2, 0xc4, 0xa3, 0xfe, 0x0d, 0x96, 0x8d, 0xde,
+	0xce, 0xbc, 0x62, 0xfa, 0x6e, 0xea, 0x93, 0xed, 0x87, 0x57, 0x3f, 0x55, 0x07, 0x05, 0x01, 0x80,
+	0xb4, 0x38, 0x89, 0xae, 0xb1, 0xcc, 0xe9, 0x68, 0x58, 0xcc, 0x70, 0x01, 0xf0, 0xb3, 0xf4, 0xaf,
+	0x53, 0xec, 0x2a, 0x8e, 0x32, 0x1e, 0xfb, 0xae, 0x37, 0xf6, 0x26, 0x62, 0xc0, 0xc7, 0x24, 0x36,
+	0xc5, 0xc5, 0xde, 0x5d, 0x10, 0xab, 0x70, 0x28, 0x23, 0x3c, 0x67, 0x5b, 0xa4, 0xd7, 0xc1, 0x28,
+	0x9a, 0xf4, 0x67, 0x5c, 0xab, 0xc2, 0xc3, 0x6f, 0x9c, 0xc5, 0x9e, 0x60, 0x68, 0x5c, 0xb0, 0x92,
+	0x12, 0x2a, 0x79, 0xb6, 0x01, 0x64, 0xde, 0x34, 0x2a, 0x7d, 0x87, 0xdd, 0x3a, 0x8b, 0x17, 0x8c,
+	0x84, 0xbf, 0xc2, 0x62, 0xfa, 0x6e, 0x06, 0x8c, 0x44, 0x8f, 0xa5, 0x67, 0x2c, 0x2f, 0x39, 0xf5,
+	0x5f, 0x63, 0x39, 0x92, 0x18, 0x16, 0x53, 0x77, 0x33, 0x9f, 0x14, 0x1e, 0x96, 0xce, 0xd2, 0x0f,
+	0x0d, 0x62, 0x49, 0x9e, 0x52, 0x9b, 0x5d, 0x02, 0x92, 0xf0, 0xcd, 0x28, 0x1a, 0x1c, 0xb9, 0x03,
+	0x7f, 0xfa, 0x7a, 0x74, 0xa8, 0x5f, 0x61, 0x6b, 0xaf, 0xc7, 0xfd, 0xc3, 0x90, 0x96, 0x07, 0x1f,
+	0xf4, 0x12, 0xdb, 0x9a, 0x8c, 0xc2, 0xd0, 0x0d, 0xbd, 0xe9, 0xd0, 0x1d, 0x7b, 0x53, 0x6e, 0x8f,
+	0x2d, 0xab, 0x00, 0x40, 0xdb, 0x9b, 0x0e, 0x5b, 0xde, 0xb4, 0x54, 0x61, 0x5b, 0x7c, 0x9d, 0xfa,
+	0x07, 0x63, 0xcf, 0x9d, 0xf8, 0x43, 0xfd, 0x06, 0xcb, 0xe1, 0xc3, 0x68, 0x28, 0x16, 0x9b, 0x3f,
+	0x37, 0x87, 0xfa, 0x35, 0xb6, 0x8e, 0xe3, 0x91, 0x20, 0x7a, 0x2a, 0xfd, 0xb3, 0x34, 0xcb, 0x81,
+	0x90, 0x99, 0x1f, 0x44, 0xfa, 0x75, 0xb6, 0x01, 0xff, 0xba, 0x53, 0x9f, 0xd8, 0xd7, 0xe1, 0xb1,
+	0xe3, 0x03, 0xe2, 0xe8, 0x8d, 0xdb, 0x1f, 0x0e, 0x03, 0xb2, 0xcf, 0xfa, 0xd1, 0x1b, 0x63, 0x38,
+	0x0c, 0x74, 0x9d, 0x65, 0xa7, 0xfd, 0x89, 0xc7, 0x3d, 0x23, 0x6f, 0xf1, 0xdf, 0xca, 0x50, 0x59,
+	0x75, 0x28, 0x98, 0x68, 0x18, 0xf5, 0x23, 0xaf, 0xb8, 0x86, 0x13, 0xe5, 0x0f, 0x20, 0x61, 0x70,
+	0x1c, 0x04, 0xc5, 0x75, 0x0e, 0xe4, 0xbf, 0xf5, 0x0f, 0x18, 0xeb, 0x0f, 0x4f, 0xbc, 0x20, 0x1a,
+	0x85, 0xde, 0xb0, 0xb8, 0xc1, 0x31, 0x0a, 0x44, 0xbf, 0xc5, 0xf2, 0xe1, 0xf1, 0x0c, 0x74, 0xf3,
+	0x86, 0xc5, 0x1c, 0x47, 0xc7, 0x00, 0x90, 0x38, 0xf3, 0xbc, 0xa0, 0x98, 0x47, 0x89, 0xf0, 0x5b,
+	0xbf, 0xcd, 0x18, 0x48, 0x76, 0xc3, 0x99, 0xe7, 0x0d, 0x8b, 0x0c, 0x59, 0x00, 0x62, 0x03, 0x40,
+	0xdf, 0x61, 0xf9, 0x49, 0xff, 0x94, 0xb0, 0x05, 0x8e, 0xcd, 0x4d, 0xfa, 0xa7, 0x1c, 0x59, 0xfa,
+	0x77, 0x29, 0x76, 0x59, 0x59, 0xb6, 0xd7, 0x5e, 0x3f, 0x3a, 0x0e, 0xbc, 0x50, 0xbf, 0xc3, 0x0a,
+	0xc3, 0x7e, 0xd4, 0x9f, 0xf5, 0xa3, 0x23, 0x61, 0xf0, 0xac, 0xc5, 0x04, 0xa8, 0xc9, 0xa5, 0x4e,
+	0xdd, 0x83, 0xe3, 0xd7, 0xaf, 0xbd, 0x20, 0x24, 0xb3, 0xe7, 0xa6, 0x15, 0x7c, 0x86, 0xb5, 0x9a,
+	0xe2, 0xd2, 0x85, 0x14, 0x57, 0x1b, 0x53, 0x87, 0x3f, 0xea, 0xf7, 0xd8, 0x66, 0xff, 0xf8, 0x74,
+	0x34, 0x1e, 0xf5, 0x83, 0xb7, 0x20, 0x19, 0xcd, 0x58, 0x90, 0xb0, 0xe6, 0x50, 0x2f, 0xb1, 0xcd,
+	0x41, 0x7f, 0xd6, 0x3f, 0x18, 0x8d, 0x47, 0xd1, 0xc8, 0x0b, 0xc9, 0xa4, 0x09, 0x58, 0x29, 0x60,
+	0x17, 0xc5, 0xca, 0xba, 0x60, 0xeb, 0xe3, 0x50, 0x7f, 0xcc, 0xd6, 0x03, 0xaf, 0x1f, 0x52, 0x2e,
+	0xd8, 0x7e, 0x78, 0x6b, 0xc1, 0x7d, 0x39, 0x35, 0xd2, 0x58, 0x44, 0x0b, 0x89, 0x62, 0xe8, 0x85,
+	0x03, 0x0a, 0xc9, 0xab, 0x4b, 0x79, 0x2c, 0x4e, 0x52, 0xfa, 0xfb, 0x29, 0xb6, 0x29, 0xc5, 0x80,
+	0x4b, 0xfe, 0xfc, 0x2e, 0x15, 0xbb, 0x4f, 0x26, 0xe1, 0x3e, 0x3a, 0xcb, 0x4e, 0xfa, 0xe1, 0x17,
+	0x64, 0x0d, 0xfe, 0x1b, 0x1c, 0x41, 0xba, 0x05, 0xd9, 0x20, 0x06, 0x94, 0xde, 0x60, 0xec, 0x4e,
+	0xfa, 0xd1, 0xe0, 0x48, 0xbf, 0x9f, 0x48, 0x4b, 0x3b, 0x0b, 0x93, 0xe0, 0x54, 0x6a, 0x46, 0xfa,
+	0x55, 0xc6, 0xfc, 0xd3, 0x89, 0xfb, 0x7a, 0xe4, 0x8d, 0x87, 0x98, 0x16, 0x0a, 0x0f, 0x6f, 0x2e,
+	0xb0, 0x49, 0x12, 0x2b, 0xef, 0x9f, 0x4e, 0xea, 0x9c, 0xb8, 0xf4, 0x3f, 0x52, 0x18, 0x99, 0x12,
+	0xa9, 0x7f, 0x9b, 0x01, 0xda, 0x1d, 0x8c, 0xfb, 0x61, 0x48, 0x2a, 0x2c, 0x97, 0xc5, 0x29, 0xac,
+	0x9c, 0x7f, 0x3a, 0xa9, 0xc2, 0x2f, 0xfd, 0x07, 0x30, 0x87, 0x03, 0x94, 0xc2, 0xa7, 0x5e, 0x78,
+	0xf8, 0xc1, 0x52, 0x46, 0x49, 0xd5, 0xb8, 0x60, 0xe5, 0xfc, 0xd7, 0x07, 0x5c, 0x15, 0xfd, 0x25,
+	0xd3, 0xbd, 0xd3, 0x99, 0x17, 0x8c, 0x20, 0x01, 0x79, 0x01, 0xc9, 0x59, 0xe3, 0x72, 0xbe, 0xbe,
+	0x54, 0xce, 0x22, 0x79, 0xe3, 0x82, 0x75, 0x49, 0x85, 0x72, 0xc9, 0x95, 0x0d, 0xb6, 0xc6, 0xb1,
+	0xa5, 0x3f, 0xd9, 0xc6, 0xac, 0x96, 0x50, 0xe2, 0xec, 0x5d, 0x40, 0xa5, 0xe4, 0x26, 0x0f, 0xc9,
+	0xe6, 0x37, 0x58, 0xee, 0xa8, 0x1f, 0xba, 0x7c, 0x9d, 0xc1, 0xdb, 0x72, 0xd6, 0xc6, 0x51, 0x3f,
+	0x6c, 0xc3, 0x52, 0x5f, 0x61, 0x59, 0xf0, 0x1c, 0x74, 0x8a, 0xc6, 0x05, 0x8b, 0x3f, 0xe9, 0x1f,
+	0xb3, 0xad, 0xd9, 0xd1, 0xdb, 0x70, 0x34, 0xe8, 0x8f, 0xb9, 0xcf, 0xa1, 0x77, 0x34, 0x2e, 0x58,
+	0x9b, 0x02, 0xdc, 0x03, 0xb2, 0xaf, 0xb3, 0x6d, 0xca, 0x92, 0x5e, 0xd4, 0x87, 0x08, 0xe5, 0x26,
+	0xc8, 0xc2, 0x9e, 0xc1, 0xe1, 0x6d, 0x02, 0xeb, 0x37, 0xd8, 0x86, 0x17, 0x1d, 0xb9, 0xc3, 0x30,
+	0xe2, 0x09, 0x69, 0xb3, 0x71, 0xc1, 0x5a, 0xf7, 0xa2, 0xa3, 0x5a, 0x18, 0x09, 0x54, 0x18, 0x0c,
+	0x78, 0x46, 0x12, 0x28, 0x3b, 0x18, 0xe8, 0x3b, 0x2c, 0x07, 0x28, 0x3e, 0xe1, 0x1c, 0x29, 0x00,
+	0xc4, 0x0e, 0xcc, 0x69, 0x87, 0xe5, 0x4e, 0xc6, 0xfd, 0xa9, 0x7b, 0x32, 0x1a, 0x62, 0x4a, 0x02,
+	0x24, 0x40, 0xf6, 0x47, 0x43, 0x89, 0x9c, 0x0d, 0x66, 0x98, 0x95, 0x04, 0xb2, 0x37, 0x98, 0xc1,
+	0x88, 0xa3, 0x99, 0x3b, 0x0c, 0x07, 0x33, 0xcc, 0x49, 0x30, 0xe2, 0x68, 0x56, 0x0b, 0x07, 0x33,
+	0xfd, 0x3a, 0x5b, 0x1f, 0xcd, 0x5c, 0x6f, 0x30, 0x2d, 0x6e, 0x12, 0x66, 0x6d, 0x34, 0x33, 0x07,
+	0x53, 0x10, 0x38, 0x9a, 0x61, 0x19, 0x51, 0xdc, 0x12, 0x02, 0x47, 0xb3, 0x1e, 0x2f, 0x22, 0x38,
+	0xf2, 0xe4, 0x31, 0x9f, 0xc3, 0x76, 0x8c, 0x3c, 0x79, 0x4c, 0x93, 0xe0, 0x48, 0x98, 0xfb, 0x45,
+	0x15, 0x49, 0x93, 0x8f, 0x06, 0x33, 0xce, 0xa8, 0x09, 0x55, 0xa2, 0xc1, 0x0c, 0xf8, 0x08, 0x05,
+	0x6c, 0x97, 0x14, 0x14, 0x71, 0x1d, 0x0f, 0x91, 0x4b, 0x17, 0xa8, 0xe3, 0xa1, 0xe0, 0x02, 0x14,
+	0x70, 0x5d, 0x56, 0x50, 0xc0, 0xb5, 0xc3, 0x72, 0xe1, 0x20, 0x42, 0xb6, 0x2b, 0x42, 0x11, 0x80,
+	0x90, 0x96, 0x1c, 0x09, 0x8c, 0x57, 0x55, 0x24, 0x70, 0xde, 0x63, 0x85, 0xd1, 0x60, 0x02, 0x93,
+	0xe0, 0x4b, 0x71, 0x8d, 0xf0, 0x0c, 0x81, 0x7c, 0x35, 0x62, 0x92, 0x81, 0x3f, 0xf4, 0x8a, 0xd7,
+	0x93, 0x24, 0x55, 0x7f, 0xe8, 0x81, 0x6d, 0xfb, 0xc1, 0xcc, 0xf5, 0x67, 0xc5, 0xa2, 0xb0, 0x6d,
+	0x3f, 0x98, 0x75, 0xf9, 0x7a, 0x00, 0x22, 0x9c, 0xf5, 0x8b, 0x37, 0x84, 0xce, 0xfd, 0x60, 0x66,
+	0xcf, 0xfa, 0x02, 0x15, 0xcd, 0xfa, 0xc5, 0x9b, 0x0a, 0xca, 0x89, 0x51, 0xe1, 0x51, 0xbf, 0xb8,
+	0x23, 0xfc, 0x06, 0xb8, 0x8e, 0x62, 0xae, 0xa3, 0x7e, 0xf1, 0x96, 0x82, 0x72, 0x8e, 0xfa, 0xb4,
+	0x1a, 0x4f, 0xb8, 0x11, 0x6e, 0x13, 0x0e, 0x56, 0xe3, 0x49, 0xbc, 0x54, 0x4f, 0xb8, 0x11, 0x3e,
+	0x50, 0x91, 0xc2, 0x08, 0x80, 0x7c, 0x3d, 0xee, 0x1f, 0x78, 0xe3, 0xe2, 0x1d, 0x39, 0xc3, 0xd9,
+	0xc9, 0x93, 0x3a, 0x87, 0x49, 0x23, 0x3c, 0x41, 0x3b, 0xdd, 0x4d, 0x18, 0xe1, 0x49, 0xc2, 0x4e,
+	0x4f, 0xd0, 0x4e, 0xf7, 0x92, 0x24, 0xdc, 0x4e, 0x5f, 0x63, 0xdb, 0x7c, 0xa0, 0xe9, 0xd0, 0x8d,
+	0xfa, 0xc1, 0xa1, 0x17, 0x15, 0x4b, 0xa4, 0xcb, 0x26, 0xc0, 0x3b, 0x43, 0x87, 0x43, 0xf5, 0xbb,
+	0xa4, 0xd0, 0x74, 0xe8, 0x86, 0xe1, 0xb8, 0xf8, 0x21, 0x11, 0xe5, 0x91, 0xc8, 0x0e, 0xc7, 0x2a,
+	0x45, 0x34, 0x1e, 0x17, 0x3f, 0x4a, 0x52, 0x38, 0xe3, 0xb1, 0x7e, 0x87, 0xb1, 0xc9, 0x6c, 0x1c,
+	0xba, 0x38, 0xa7, 0x8f, 0x49, 0x9b, 0x3c, 0xc0, 0x5a, 0x7c, 0x4a, 0x37, 0xd8, 0x06, 0x27, 0x88,
+	0x06, 0xc5, 0xaf, 0x89, 0x05, 0x00, 0x80, 0xc3, 0xad, 0xc5, 0x51, 0x07, 0x7e, 0x58, 0xfc, 0xba,
+	0x70, 0x19, 0x80, 0x54, 0xfc, 0x10, 0x90, 0xb3, 0x83, 0x03, 0x77, 0x14, 0x8e, 0x86, 0xc5, 0x4f,
+	0x04, 0x72, 0x76, 0x70, 0xd0, 0x0c, 0x47, 0x43, 0xfd, 0x36, 0xcb, 0x47, 0xc7, 0xd3, 0xa9, 0x37,
+	0x86, 0x5d, 0xf8, 0x1b, 0x94, 0x31, 0x72, 0x08, 0x6a, 0x0e, 0xa5, 0xa5, 0xbd, 0xd3, 0xe8, 0x68,
+	0x18, 0x14, 0xcb, 0xaa, 0xa5, 0x4d, 0x0e, 0xd3, 0x3f, 0x63, 0x97, 0x93, 0x89, 0x07, 0x73, 0xdb,
+	0x88, 0xcb, 0x4a, 0x59, 0x97, 0x12, 0xd9, 0x87, 0xe7, 0xb9, 0x12, 0xdb, 0xa4, 0x0c, 0x84, 0xa4,
+	0xbf, 0xce, 0x8d, 0x91, 0xb2, 0x18, 0xa6, 0x21, 0x95, 0x26, 0x0c, 0x06, 0x48, 0xf3, 0x85, 0x42,
+	0x63, 0x07, 0x03, 0x4e, 0xf3, 0x11, 0xdb, 0x12, 0x69, 0x07, 0x89, 0x26, 0x5c, 0xbd, 0x94, 0x55,
+	0xa0, 0xdc, 0x23, 0xa8, 0x44, 0x46, 0x40, 0xaa, 0x40, 0x50, 0x51, 0x5a, 0x48, 0x50, 0x49, 0xa5,
+	0x42, 0x95, 0x4a, 0xd1, 0x8a, 0xc2, 0x03, 0x89, 0x7e, 0x8b, 0x88, 0x18, 0xc6, 0x88, 0x4a, 0x13,
+	0x09, 0x9a, 0xbf, 0xa5, 0xd0, 0x38, 0x44, 0xf3, 0x31, 0x1f, 0xed, 0x49, 0xac, 0xd3, 0xdf, 0x4e,
+	0xd1, 0xfc, 0x0a, 0x14, 0x00, 0x09, 0x32, 0xa9, 0xd4, 0xdf, 0x49, 0x90, 0x09, 0xad, 0xbe, 0xc9,
+	0x34, 0x25, 0x1c, 0x90, 0xf2, 0xb7, 0x53, 0x34, 0xec, 0x76, 0x1c, 0x14, 0x42, 0xa6, 0xf0, 0x06,
+	0xa4, 0xfc, 0x87, 0x82, 0xb2, 0x40, 0x3e, 0xc1, 0xc9, 0x60, 0x3b, 0x11, 0x7e, 0x81, 0x74, 0xbf,
+	0x93, 0xa2, 0x15, 0xdd, 0x14, 0xde, 0x91, 0x18, 0x1c, 0x3d, 0x04, 0x49, 0x7f, 0x37, 0x31, 0x38,
+	0xfa, 0x09, 0x10, 0xc3, 0x8e, 0x7a, 0xd2, 0x1f, 0x1f, 0x7b, 0x95, 0x75, 0xac, 0x74, 0x4a, 0x2e,
+	0xbb, 0xb9, 0x7a, 0x57, 0x86, 0x92, 0x16, 0x30, 0x78, 0xc8, 0xa0, 0xe2, 0x0a, 0x8a, 0x8c, 0x06,
+	0x1e, 0xc3, 0xc0, 0x47, 0x14, 0x26, 0xaa, 0x3f, 0x13, 0xb0, 0xd2, 0xbf, 0xcd, 0xe2, 0x51, 0xb1,
+	0x3f, 0x80, 0xf3, 0xa3, 0xfe, 0x59, 0x62, 0xcf, 0x5e, 0xac, 0x0d, 0x91, 0x4c, 0xad, 0x91, 0xbe,
+	0xc3, 0xd6, 0xfd, 0xe3, 0x68, 0x76, 0x1c, 0x51, 0x6d, 0xf8, 0xc1, 0x2a, 0x1e, 0xa4, 0x82, 0xa0,
+	0xc4, 0x5f, 0xfa, 0x0f, 0x28, 0x28, 0xa3, 0x68, 0xcc, 0xb7, 0xf4, 0xc2, 0x92, 0x93, 0x22, 0xf1,
+	0x0a, 0x3a, 0x11, 0xb6, 0x4e, 0x34, 0xd6, 0x1f, 0xb2, 0xec, 0xec, 0x38, 0x3c, 0xa2, 0x8a, 0x68,
+	0xa5, 0xaa, 0x40, 0xc3, 0x6b, 0x85, 0xe3, 0xf0, 0x08, 0x86, 0x9c, 0xf9, 0x33, 0x2e, 0x8e, 0x2a,
+	0xa0, 0x95, 0x43, 0x0a, 0x3a, 0x9e, 0x0c, 0xfc, 0x59, 0x7b, 0x36, 0x0e, 0xf5, 0x6f, 0xb1, 0xb5,
+	0xc3, 0xc0, 0x3f, 0x9e, 0xf1, 0xc2, 0xa0, 0xf0, 0xf0, 0xf6, 0x2a, 0x5e, 0x4e, 0x04, 0x9b, 0x06,
+	0xff, 0xa1, 0x7f, 0x9b, 0xad, 0x4f, 0xdf, 0xf0, 0x69, 0x6e, 0x9c, 0x6d, 0x22, 0xa4, 0x02, 0xc6,
+	0xe9, 0x1b, 0x98, 0xe2, 0x53, 0x96, 0x0f, 0xbd, 0x88, 0x2a, 0xb6, 0x1c, 0xe7, 0xbd, 0xb7, 0x8a,
+	0x57, 0x12, 0x42, 0x7e, 0x0a, 0xbd, 0x08, 0x8b, 0xbf, 0xcf, 0xe7, 0x5c, 0x20, 0xcf, 0x85, 0x7c,
+	0xb4, 0x4a, 0x88, 0x4a, 0x0b, 0x49, 0x5c, 0x7d, 0xae, 0xe4, 0xd8, 0x3a, 0x92, 0x95, 0x9e, 0x62,
+	0xb9, 0x97, 0x58, 0x58, 0x7e, 0xe6, 0x82, 0xf2, 0x2b, 0x45, 0x67, 0x2e, 0x3a, 0x4d, 0xc2, 0xa1,
+	0x2a, 0x3e, 0xbc, 0xae, 0x4f, 0xfa, 0xa7, 0x70, 0x6e, 0xfd, 0x0c, 0xcf, 0x53, 0x73, 0xcb, 0x0b,
+	0xc5, 0x9f, 0x74, 0x09, 0x3a, 0xbd, 0xd2, 0x72, 0x97, 0xee, 0xe3, 0x51, 0x46, 0x59, 0x55, 0x28,
+	0xfd, 0xbd, 0xe8, 0xc8, 0x0b, 0xa4, 0xc7, 0x6e, 0x59, 0x31, 0xa0, 0xf4, 0x28, 0x31, 0x84, 0x58,
+	0xce, 0x2f, 0x61, 0xfa, 0x15, 0xa6, 0xcd, 0xaf, 0x23, 0x28, 0xc5, 0x7f, 0x28, 0x47, 0x6a, 0xfe,
+	0xdc, 0x1c, 0x96, 0xca, 0x09, 0x43, 0xe0, 0xf2, 0xe9, 0x57, 0xe5, 0x72, 0xd3, 0x71, 0x9e, 0x2f,
+	0x66, 0xa9, 0xc1, 0xae, 0x2c, 0x5b, 0x2e, 0xfd, 0x33, 0xaa, 0xa2, 0x39, 0xf5, 0xd9, 0xe7, 0x0b,
+	0x2a, 0xb7, 0x9f, 0xb3, 0xeb, 0x2b, 0xd6, 0x6c, 0x21, 0xe4, 0x53, 0x8b, 0x21, 0x0f, 0x0b, 0xc5,
+	0xeb, 0x5f, 0x58, 0x91, 0x4d, 0x8b, 0xff, 0x2e, 0xfd, 0x41, 0x06, 0xcd, 0x3b, 0x9a, 0x86, 0x51,
+	0x70, 0x8c, 0xb9, 0x40, 0x57, 0x72, 0xc1, 0x16, 0x45, 0x7b, 0x83, 0xb1, 0x43, 0x3f, 0xf2, 0xf1,
+	0xd4, 0x4a, 0x11, 0xbf, 0x78, 0x88, 0x50, 0xa4, 0xb8, 0x31, 0x39, 0xec, 0xd6, 0xf0, 0xc4, 0x8f,
+	0xb8, 0xba, 0xc3, 0xb6, 0xdf, 0x04, 0xa3, 0x48, 0xa9, 0xc7, 0x31, 0x07, 0x7c, 0xf3, 0x4c, 0x69,
+	0x49, 0x16, 0x28, 0xde, 0x39, 0x44, 0x16, 0xef, 0x4f, 0xd9, 0x06, 0x9a, 0x25, 0xa4, 0xbc, 0xf0,
+	0xd1, 0x99, 0xe2, 0x88, 0x16, 0x62, 0x9c, 0x7e, 0xea, 0xdf, 0x65, 0x6b, 0x13, 0x0f, 0x4c, 0x87,
+	0xf9, 0xa1, 0x74, 0x26, 0x3f, 0xa7, 0x84, 0x78, 0xe5, 0x3f, 0xf4, 0xee, 0x9c, 0xf5, 0xd7, 0x57,
+	0x34, 0xb0, 0x54, 0x11, 0x67, 0x86, 0xdc, 0x3a, 0x2e, 0x55, 0xe9, 0xdb, 0xb8, 0x0d, 0x2c, 0xb7,
+	0xeb, 0x19, 0x3d, 0x9f, 0x52, 0x9f, 0x7d, 0x70, 0xb6, 0x09, 0xf5, 0x9b, 0x2c, 0x27, 0x57, 0x00,
+	0xfb, 0x17, 0xf2, 0x59, 0xff, 0x90, 0x6d, 0x25, 0x8b, 0x96, 0x34, 0x27, 0xd8, 0x9c, 0x28, 0xd5,
+	0x4a, 0xa9, 0x85, 0xde, 0xb8, 0xc4, 0xac, 0xfa, 0x83, 0x78, 0x35, 0xb0, 0x57, 0x76, 0x7d, 0x45,
+	0xe2, 0x91, 0xe6, 0x2f, 0x3d, 0xc4, 0x9e, 0xe2, 0x82, 0x91, 0x79, 0x6a, 0x80, 0x1f, 0xca, 0x24,
+	0xf9, 0x73, 0x73, 0x58, 0xda, 0xc7, 0xd6, 0xde, 0x2a, 0xab, 0xfe, 0xc2, 0x41, 0xf1, 0xa7, 0x19,
+	0xec, 0x64, 0x70, 0x7d, 0x27, 0x3e, 0x75, 0xd0, 0xfc, 0x2f, 0x46, 0x1e, 0x59, 0x8a, 0x9e, 0xf4,
+	0x3b, 0xac, 0x80, 0xbf, 0x54, 0x2b, 0x31, 0x04, 0xf1, 0x22, 0x40, 0x5d, 0xa1, 0x4c, 0xb2, 0x2b,
+	0xf7, 0x3d, 0xb6, 0x31, 0xf0, 0x27, 0x93, 0xfe, 0x14, 0xcf, 0xf6, 0xdb, 0x4b, 0x32, 0xbc, 0x18,
+	0xdf, 0x25, 0x42, 0x4b, 0x70, 0xe8, 0xf7, 0xd8, 0xe6, 0x68, 0x38, 0xf6, 0xdc, 0x68, 0x34, 0xf1,
+	0xfc, 0xe3, 0x88, 0xfa, 0x1f, 0x05, 0x80, 0x39, 0x08, 0x02, 0x92, 0xa3, 0x7e, 0x30, 0x94, 0x24,
+	0xd8, 0x64, 0x2b, 0x00, 0x4c, 0x90, 0xdc, 0x64, 0xb9, 0x59, 0x30, 0xf2, 0x83, 0x51, 0xf4, 0x96,
+	0x3a, 0x6d, 0xf2, 0x59, 0xdf, 0x61, 0x79, 0x6c, 0x5f, 0x81, 0xea, 0xd8, 0x67, 0xcb, 0x21, 0xa0,
+	0xc9, 0x9b, 0x8d, 0xfe, 0x71, 0x84, 0xa7, 0x6e, 0x6c, 0xb5, 0x6d, 0xf8, 0xc7, 0x11, 0x3f, 0x6e,
+	0xef, 0xb0, 0x3c, 0xa0, 0x70, 0xbb, 0xc4, 0x66, 0x1b, 0xd0, 0xee, 0xf2, 0x8c, 0x2a, 0xfb, 0x9d,
+	0x05, 0xb5, 0xdf, 0xf9, 0x57, 0xd8, 0x1a, 0xef, 0xc0, 0xf0, 0xf3, 0x6c, 0xe1, 0xe1, 0xb5, 0xe5,
+	0xfd, 0x19, 0x0b, 0x89, 0xf4, 0xa7, 0x6c, 0x53, 0x59, 0xf0, 0xb0, 0xb8, 0xc5, 0x1d, 0xec, 0xd6,
+	0x59, 0xb1, 0x66, 0x25, 0x38, 0x4a, 0x3f, 0x4e, 0x61, 0xe9, 0x73, 0x70, 0x3c, 0xf8, 0xc2, 0x8b,
+	0x60, 0x71, 0xdf, 0x78, 0xa3, 0xc3, 0x23, 0xb1, 0x83, 0xd1, 0x13, 0x14, 0x59, 0x6f, 0x78, 0x63,
+	0x88, 0x4f, 0x13, 0xb7, 0xb1, 0x3c, 0x87, 0xf0, 0x89, 0xde, 0x61, 0x05, 0x44, 0xe3, 0x54, 0x71,
+	0x75, 0x91, 0x03, 0x27, 0xfb, 0x40, 0x4d, 0x49, 0xe7, 0x0b, 0x82, 0xff, 0x4c, 0xcd, 0x23, 0xdc,
+	0x76, 0xc0, 0xf3, 0xbe, 0x1f, 0x7b, 0x09, 0x96, 0x66, 0x8b, 0x79, 0x49, 0x12, 0x2f, 0xba, 0xc9,
+	0xfd, 0x44, 0x9b, 0x7f, 0x67, 0x05, 0xab, 0x52, 0xd4, 0xa9, 0x5b, 0x5e, 0x26, 0xb1, 0xe5, 0xc1,
+	0x74, 0xd0, 0x60, 0xab, 0xa7, 0x83, 0x78, 0x4b, 0xd0, 0x95, 0x7e, 0x27, 0xc5, 0xb6, 0x79, 0x47,
+	0xb0, 0x0f, 0xcf, 0x50, 0x2f, 0x24, 0xdd, 0x2a, 0x35, 0xe7, 0x56, 0xd7, 0xd9, 0xc6, 0x68, 0xaa,
+	0x9a, 0x7b, 0x7d, 0x34, 0xe5, 0xb6, 0x56, 0x4c, 0x99, 0x39, 0x9f, 0x29, 0x65, 0x5c, 0x67, 0xd5,
+	0xb8, 0x26, 0xf3, 0x92, 0x3e, 0xa3, 0xe9, 0xd9, 0xea, 0xfc, 0xaa, 0xec, 0x98, 0xa6, 0x57, 0x04,
+	0xa8, 0x14, 0x34, 0xdf, 0x36, 0x3d, 0x23, 0xee, 0xe3, 0x5c, 0x92, 0x4d, 0xe4, 0x12, 0x19, 0x05,
+	0x6b, 0xe7, 0x89, 0x02, 0x31, 0xbd, 0x75, 0x65, 0x7a, 0xff, 0x34, 0x83, 0x45, 0x0c, 0x67, 0x0a,
+	0xbc, 0x89, 0x7f, 0xe2, 0xad, 0x4e, 0x5d, 0x6a, 0xec, 0xa7, 0xe7, 0x62, 0xff, 0xfb, 0x72, 0xe2,
+	0x19, 0x3e, 0xf1, 0x8f, 0x96, 0x67, 0x26, 0x1a, 0xe2, 0xac, 0xb9, 0x67, 0x93, 0x73, 0xbf, 0xc7,
+	0x36, 0x87, 0xc7, 0x41, 0x9f, 0x0a, 0xa1, 0x81, 0x48, 0x5b, 0x02, 0x66, 0x7b, 0x03, 0xd8, 0x7a,
+	0x24, 0xc9, 0x14, 0x68, 0x30, 0x6f, 0x49, 0xbe, 0x4e, 0xe8, 0x0d, 0x16, 0xd2, 0xdf, 0xc6, 0x97,
+	0xa7, 0xbf, 0xdc, 0x62, 0xfa, 0xbb, 0xc7, 0x36, 0x69, 0x01, 0x07, 0xfe, 0xf1, 0x14, 0x33, 0x59,
+	0xd6, 0x2a, 0x20, 0xac, 0x0a, 0x20, 0xc8, 0x01, 0x07, 0x6f, 0x23, 0x8f, 0x08, 0x18, 0x27, 0xc8,
+	0x03, 0x04, 0xd1, 0x72, 0xcd, 0xde, 0x9e, 0x63, 0xcd, 0x4a, 0x7f, 0x9a, 0xc6, 0x3d, 0x0e, 0xb7,
+	0xb3, 0x83, 0xfe, 0x74, 0x78, 0xde, 0xf7, 0x66, 0x0a, 0x87, 0x12, 0xac, 0x3a, 0xcb, 0x06, 0xfd,
+	0xc8, 0xa3, 0xe5, 0xe3, 0xbf, 0xb9, 0xc2, 0xc7, 0x41, 0x18, 0xb9, 0xe1, 0xe8, 0x37, 0x3c, 0x72,
+	0xbd, 0x3c, 0x87, 0xd8, 0xa3, 0xdf, 0xf0, 0xf4, 0x27, 0x2c, 0x3b, 0x0c, 0xfc, 0x19, 0xd5, 0x48,
+	0x67, 0x0e, 0x04, 0x74, 0x70, 0x7e, 0x82, 0x7f, 0xf5, 0xcf, 0x59, 0x61, 0x18, 0x0e, 0x66, 0xb0,
+	0xe4, 0xfd, 0xe0, 0x8b, 0x95, 0x4d, 0x64, 0x95, 0x3d, 0x26, 0x6f, 0x5c, 0xb0, 0x18, 0x3c, 0x5a,
+	0xfc, 0x49, 0xef, 0x2c, 0x2d, 0x96, 0x3e, 0x39, 0x4b, 0xd8, 0xb9, 0x6a, 0xa5, 0xab, 0x58, 0xf7,
+	0xcf, 0x4d, 0xa1, 0xf4, 0x3d, 0x2c, 0xa1, 0x96, 0xab, 0x06, 0xf6, 0x9a, 0x05, 0xde, 0xc0, 0x1d,
+	0x7b, 0x27, 0x9e, 0xa8, 0xdb, 0xf3, 0x00, 0x69, 0x01, 0xa0, 0x64, 0xb0, 0x9d, 0x33, 0x54, 0x39,
+	0x4f, 0x81, 0x51, 0xfa, 0xf7, 0x94, 0x74, 0x50, 0xc6, 0x39, 0x73, 0xba, 0x24, 0x5e, 0xcc, 0xe9,
+	0x72, 0x0f, 0x4d, 0xab, 0x7b, 0xa8, 0x5a, 0x25, 0x65, 0x12, 0x55, 0x92, 0xfe, 0x1d, 0xb6, 0x06,
+	0x9a, 0x8b, 0xb4, 0x5d, 0x3a, 0xcb, 0xd0, 0xf4, 0xda, 0x12, 0x19, 0x4a, 0xcf, 0x50, 0x73, 0x2f,
+	0x08, 0xfc, 0xc0, 0x9d, 0x84, 0x87, 0x4b, 0x4f, 0x06, 0x3a, 0xcb, 0xf2, 0x36, 0x21, 0x79, 0x21,
+	0xfc, 0x96, 0xd9, 0x29, 0xa3, 0x64, 0xa7, 0xdf, 0x4e, 0xe1, 0x42, 0xa0, 0xb4, 0x44, 0x53, 0x63,
+	0x95, 0xe8, 0x1b, 0x2c, 0xe7, 0x9d, 0xe2, 0xfe, 0x44, 0xe2, 0x37, 0xbc, 0xd3, 0x19, 0xef, 0x51,
+	0xce, 0x5b, 0x3e, 0x73, 0x46, 0x69, 0xa7, 0x6e, 0x01, 0x27, 0x14, 0x82, 0xc7, 0xe3, 0x68, 0x34,
+	0xeb, 0xf3, 0xf7, 0x5d, 0x3f, 0x3a, 0xf6, 0xc2, 0x48, 0x7f, 0x94, 0x08, 0xc1, 0x3b, 0x8b, 0x46,
+	0x92, 0x1c, 0x4a, 0x04, 0x2e, 0x5f, 0x0b, 0x9d, 0x65, 0x0f, 0xfc, 0xe1, 0x5b, 0x31, 0x7b, 0xf8,
+	0x5d, 0x8a, 0xc8, 0x39, 0x95, 0x71, 0x67, 0xe3, 0xb7, 0xbf, 0xec, 0x51, 0x7f, 0x2f, 0x85, 0x6f,
+	0x78, 0x87, 0x5e, 0x38, 0xe0, 0x2e, 0xf2, 0x3a, 0xe0, 0xbf, 0xf9, 0x78, 0x79, 0x6b, 0x63, 0xf2,
+	0x3a, 0xa8, 0x01, 0x0a, 0x5f, 0xc8, 0xc9, 0x17, 0x7d, 0x79, 0x6b, 0xfd, 0xe8, 0x8d, 0x40, 0x84,
+	0x84, 0xc0, 0xd7, 0xbc, 0xeb, 0x21, 0x22, 0x6e, 0x33, 0x16, 0x7a, 0xc1, 0xa8, 0x3f, 0x76, 0xa7,
+	0xc7, 0x13, 0x6e, 0xe1, 0xbc, 0x95, 0x47, 0x48, 0xe7, 0x78, 0x02, 0x7c, 0x43, 0x1c, 0x96, 0xe7,
+	0x8a, 0xbc, 0xb5, 0x3e, 0x9c, 0x01, 0x5f, 0xe9, 0x8f, 0x53, 0xec, 0x9a, 0xdc, 0x40, 0xc2, 0xa8,
+	0x1f, 0x85, 0x72, 0x05, 0xce, 0x78, 0x83, 0xad, 0xd6, 0x9b, 0xe9, 0x33, 0xea, 0xcd, 0xcc, 0x5c,
+	0xbd, 0xb9, 0x6a, 0xaf, 0x9d, 0xab, 0xdb, 0xd7, 0x16, 0xea, 0x76, 0x99, 0xd8, 0xd7, 0xcf, 0x93,
+	0xd8, 0xff, 0x28, 0x83, 0x75, 0x4e, 0x3c, 0x29, 0x7d, 0x9b, 0xa5, 0x47, 0x43, 0xfe, 0xa2, 0x25,
+	0x6b, 0xa5, 0x47, 0x67, 0xbe, 0x9e, 0x9f, 0xdf, 0x14, 0xd3, 0xe7, 0xd8, 0x14, 0x33, 0x4b, 0x36,
+	0x45, 0x75, 0x47, 0xcf, 0xce, 0xed, 0xe8, 0x5f, 0xcd, 0x79, 0x41, 0x3a, 0xde, 0x86, 0xea, 0x78,
+	0xb1, 0x91, 0x73, 0x09, 0x23, 0x7f, 0x85, 0xdb, 0xeb, 0xff, 0xa7, 0x83, 0xc1, 0x9f, 0xa4, 0x30,
+	0xdd, 0xf7, 0x0f, 0x0f, 0x03, 0xef, 0xb0, 0x1f, 0x79, 0x7f, 0x69, 0x3c, 0xf4, 0x37, 0xd9, 0x8d,
+	0xe5, 0x13, 0x83, 0x24, 0x34, 0xbf, 0x50, 0xa9, 0x2f, 0x5b, 0xa8, 0xf4, 0xfc, 0x42, 0xdd, 0x66,
+	0x8c, 0x0f, 0x8d, 0x68, 0xaa, 0x3a, 0x00, 0xc2, 0xd1, 0xa5, 0xbf, 0xc8, 0x60, 0xea, 0x47, 0xe3,
+	0xd1, 0x25, 0x0a, 0x77, 0x16, 0xf8, 0x33, 0x2f, 0xe0, 0xe5, 0xa6, 0x9a, 0x04, 0x17, 0x0b, 0x81,
+	0x45, 0x36, 0x35, 0x1b, 0xee, 0xcf, 0x2d, 0x3b, 0xf6, 0xa6, 0x3e, 0x3b, 0x8f, 0x14, 0x95, 0x8f,
+	0xbf, 0xba, 0x52, 0x9e, 0x75, 0x8b, 0x15, 0xa6, 0xde, 0x69, 0xa4, 0xde, 0xd3, 0x28, 0x3c, 0xbc,
+	0x7f, 0x1e, 0xb1, 0x0a, 0x1b, 0x94, 0x3e, 0xf0, 0x48, 0xb7, 0x3b, 0x76, 0xe7, 0xbb, 0x54, 0xdf,
+	0x3c, 0x8f, 0xbc, 0x25, 0xcd, 0xaa, 0xef, 0xb1, 0x8c, 0x7f, 0x3a, 0x59, 0x59, 0x87, 0x2d, 0x11,
+	0xe2, 0x9f, 0x4e, 0x1a, 0x17, 0x2c, 0xe0, 0x02, 0x8b, 0x2d, 0x29, 0xc0, 0xce, 0x65, 0xb1, 0x33,
+	0x0b, 0x31, 0xf1, 0x12, 0xa3, 0x74, 0xc8, 0x3e, 0x3c, 0x87, 0xc5, 0x17, 0x02, 0x36, 0xf5, 0x73,
+	0x07, 0xec, 0xe7, 0xac, 0xf4, 0xe5, 0x6b, 0xa0, 0x7f, 0xc4, 0xb6, 0xe3, 0x47, 0x77, 0x34, 0xc4,
+	0x91, 0xb6, 0xac, 0x4d, 0xb9, 0x32, 0xcd, 0x61, 0x58, 0xb2, 0xb1, 0x63, 0xb6, 0xda, 0xfe, 0xbf,
+	0x48, 0x57, 0xeb, 0x5b, 0xab, 0x1c, 0x1f, 0xd6, 0x03, 0x76, 0x49, 0xff, 0x74, 0xc2, 0x35, 0xca,
+	0xe0, 0x3d, 0x18, 0xff, 0x74, 0x02, 0xba, 0xfc, 0xe3, 0xd4, 0x4a, 0x0b, 0x9e, 0x59, 0x7f, 0x2e,
+	0x79, 0xd1, 0x93, 0x28, 0xa2, 0x32, 0xc9, 0x22, 0xea, 0x9b, 0x2c, 0x71, 0xb9, 0xc3, 0xa5, 0x6a,
+	0x09, 0x34, 0xd1, 0x54, 0x44, 0x0d, 0x2a, 0xa7, 0xdf, 0x4f, 0x33, 0x7d, 0x41, 0xa7, 0xf0, 0xac,
+	0x9c, 0x28, 0x2e, 0x88, 0xa5, 0x95, 0x0b, 0x62, 0x1f, 0xb3, 0x6d, 0xa5, 0xb3, 0x08, 0xf9, 0x2b,
+	0xc3, 0x93, 0xc9, 0x56, 0xdc, 0x5a, 0x84, 0x5c, 0xae, 0x92, 0xf1, 0xbe, 0x25, 0xa5, 0x47, 0x49,
+	0xf6, 0x02, 0x80, 0xca, 0x7d, 0xa1, 0xb5, 0xc4, 0x7d, 0xa1, 0x3b, 0xac, 0x30, 0xe9, 0x9f, 0xba,
+	0xde, 0x34, 0x0a, 0x46, 0x5e, 0x48, 0x5b, 0x19, 0x9b, 0xf4, 0x4f, 0x4d, 0x84, 0xe8, 0xbb, 0x50,
+	0xf6, 0xf3, 0xf4, 0x03, 0xf8, 0x0d, 0xbe, 0x9a, 0xe7, 0x09, 0x23, 0xc8, 0x57, 0x96, 0xc2, 0x5a,
+	0xfa, 0x71, 0x0a, 0xfb, 0xe7, 0x48, 0x8a, 0x7b, 0xff, 0xd9, 0x7b, 0x3d, 0xb8, 0xc6, 0x89, 0x9a,
+	0x49, 0xb7, 0xac, 0x02, 0xc2, 0x30, 0x97, 0xde, 0x63, 0x9b, 0x63, 0xdf, 0xff, 0xe2, 0x78, 0xa6,
+	0x64, 0xd3, 0xac, 0x55, 0x40, 0x18, 0x92, 0x7c, 0xc8, 0xb6, 0xb8, 0xed, 0xbc, 0x21, 0xd1, 0x64,
+	0xa9, 0x3d, 0x8b, 0x40, 0x4c, 0xba, 0x0f, 0xb0, 0xd0, 0x92, 0x57, 0xc0, 0xe2, 0x6d, 0x6c, 0xd5,
+	0xbd, 0xac, 0xd2, 0x9f, 0x51, 0x1d, 0x13, 0xf3, 0xac, 0xbe, 0xc3, 0x75, 0x9b, 0xb1, 0xe0, 0x94,
+	0x1a, 0x20, 0xa1, 0xd8, 0x11, 0x82, 0xd3, 0x1e, 0x02, 0x00, 0x1d, 0xc5, 0x68, 0x9c, 0x43, 0x3e,
+	0x92, 0xe8, 0x1b, 0x2c, 0x17, 0x9c, 0xba, 0xb0, 0x81, 0x84, 0xa4, 0xfc, 0x46, 0x70, 0x5a, 0x81,
+	0x47, 0x6e, 0x3d, 0x81, 0xc2, 0x6d, 0x6f, 0x23, 0x22, 0x14, 0x8e, 0x09, 0xa7, 0xba, 0x99, 0x37,
+	0xe4, 0xab, 0xca, 0xc7, 0xac, 0x21, 0x80, 0xc6, 0x14, 0xe8, 0x0d, 0x31, 0xa6, 0x40, 0xef, 0xb0,
+	0x7c, 0x70, 0x8a, 0xc7, 0x8f, 0x90, 0x4a, 0x95, 0x5c, 0x70, 0x6a, 0xf2, 0x67, 0x40, 0x46, 0x12,
+	0x89, 0x95, 0x4a, 0x2e, 0x12, 0xc8, 0xbb, 0x6c, 0x33, 0x38, 0x75, 0x5f, 0x07, 0xfd, 0x89, 0x07,
+	0x24, 0x54, 0xa8, 0xb0, 0xe0, 0xb4, 0x0e, 0x20, 0x93, 0xdf, 0x5a, 0x2c, 0x04, 0xa7, 0xae, 0x7f,
+	0xe2, 0x05, 0x9c, 0xa0, 0x20, 0x54, 0xeb, 0x9e, 0x78, 0x01, 0xe0, 0x6f, 0x71, 0xcd, 0x07, 0xc1,
+	0x80, 0xa3, 0x37, 0xc5, 0xe0, 0xd5, 0x60, 0x80, 0xdc, 0x6c, 0xe0, 0x8f, 0xc7, 0xa3, 0x90, 0xea,
+	0x16, 0xda, 0xeb, 0x05, 0x64, 0xa1, 0x42, 0xdc, 0x3e, 0x47, 0x85, 0x78, 0x71, 0xb1, 0x42, 0x2c,
+	0x3d, 0xc6, 0x8e, 0x3d, 0x76, 0xf8, 0x16, 0x4a, 0x9b, 0x55, 0xef, 0xba, 0xf6, 0x31, 0xee, 0xb1,
+	0xa9, 0x87, 0x0e, 0xe7, 0x05, 0xff, 0xef, 0x45, 0x43, 0xe9, 0xc7, 0x69, 0x0c, 0x1d, 0x45, 0x9d,
+	0x33, 0xd4, 0xe0, 0xcb, 0xe7, 0xbd, 0x4e, 0xc4, 0x4d, 0x2e, 0xf0, 0x5e, 0xcb, 0xa0, 0x49, 0x68,
+	0x93, 0xf9, 0x32, 0x6d, 0xb2, 0xf3, 0x25, 0xcc, 0x57, 0xd5, 0x9a, 0xaa, 0xb0, 0x4d, 0xb2, 0x14,
+	0x9f, 0x11, 0xe5, 0x96, 0x3b, 0x2b, 0x7a, 0xa5, 0xc2, 0x9c, 0x56, 0x01, 0x9f, 0x6d, 0xe0, 0x81,
+	0x63, 0xdb, 0x76, 0x6c, 0x19, 0x7e, 0x78, 0xfb, 0xb2, 0x2b, 0x8c, 0x67, 0x76, 0x72, 0xd3, 0x2b,
+	0x3b, 0xb9, 0x99, 0x73, 0x76, 0x72, 0x4f, 0xd4, 0xa5, 0x82, 0xb4, 0xfa, 0x16, 0x34, 0x92, 0x47,
+	0xc9, 0xc2, 0x4a, 0x8d, 0x80, 0x04, 0xef, 0x87, 0xea, 0x0f, 0xf1, 0x0e, 0xb0, 0xa8, 0xd0, 0x6e,
+	0xad, 0xe0, 0xe0, 0x34, 0x78, 0x43, 0x38, 0x2c, 0xfd, 0xbd, 0x14, 0x3a, 0x1f, 0xa2, 0xe4, 0xa6,
+	0x73, 0x85, 0xad, 0xf1, 0xab, 0x83, 0xe2, 0x45, 0x2b, 0x7f, 0x58, 0xb8, 0x18, 0x9b, 0x5e, 0xbc,
+	0x18, 0x0b, 0x5e, 0x00, 0x3b, 0x03, 0x97, 0x27, 0x76, 0xdd, 0xfc, 0xa4, 0x7f, 0xca, 0xab, 0xf1,
+	0x50, 0x2f, 0x26, 0x7b, 0xf6, 0x5b, 0xf1, 0x4e, 0xfe, 0x1d, 0xb5, 0x13, 0xb4, 0xd8, 0x3e, 0x38,
+	0xe3, 0x2d, 0xd5, 0xaf, 0xe3, 0xfb, 0x5f, 0xa5, 0xcb, 0x82, 0xbe, 0x5e, 0x66, 0x97, 0xc8, 0x67,
+	0x39, 0x50, 0x0d, 0xa3, 0x8b, 0x88, 0xa8, 0xf4, 0xa7, 0x98, 0xcc, 0xf5, 0xaf, 0xb1, 0x8b, 0xdc,
+	0x79, 0x15, 0x4a, 0x8c, 0xa7, 0x2d, 0x00, 0x4b, 0xba, 0xd2, 0x1f, 0x52, 0x4c, 0xe1, 0x60, 0x32,
+	0xa6, 0x56, 0xa8, 0x36, 0x57, 0xb7, 0xa7, 0xe7, 0xea, 0x76, 0x18, 0x35, 0xee, 0x70, 0xab, 0x81,
+	0xb5, 0x85, 0xe0, 0xe6, 0x14, 0xe9, 0x4a, 0x8c, 0xab, 0x11, 0x53, 0x61, 0x74, 0x15, 0x00, 0x28,
+	0x68, 0xbe, 0xaa, 0xf8, 0x7a, 0xca, 0x58, 0x6c, 0x43, 0x8a, 0xae, 0x7b, 0x67, 0xb5, 0xb4, 0xd0,
+	0x9f, 0xf2, 0xf0, 0x1b, 0xa3, 0xeb, 0xb7, 0xb0, 0x4b, 0x8e, 0x24, 0x67, 0x5e, 0xc4, 0x57, 0x2d,
+	0x97, 0x5e, 0xd1, 0x54, 0xcb, 0xfc, 0xbc, 0x4d, 0xb5, 0x7f, 0x43, 0x2e, 0x8d, 0x04, 0xd2, 0xa5,
+	0xe9, 0x1a, 0x3a, 0xbe, 0x82, 0x4e, 0xc9, 0x6b, 0xe8, 0x6d, 0xfe, 0x0e, 0xf4, 0x36, 0x4d, 0x1a,
+	0x9d, 0x9e, 0xd6, 0x09, 0x20, 0xce, 0x52, 0xc7, 0xcf, 0x2c, 0x71, 0x7c, 0x92, 0x2f, 0x3a, 0x81,
+	0x42, 0x3e, 0xb8, 0x8e, 0x44, 0x0e, 0xfc, 0xb1, 0x1f, 0xd0, 0xca, 0x00, 0xb2, 0x0a, 0xcf, 0xa5,
+	0xdf, 0x54, 0x5d, 0x0a, 0x63, 0xff, 0x5b, 0xb2, 0xee, 0x4a, 0xad, 0xb8, 0x10, 0xa3, 0x5a, 0x57,
+	0x96, 0x65, 0x5f, 0x9a, 0x01, 0x14, 0xb7, 0x15, 0x19, 0xe0, 0x84, 0xdd, 0xe3, 0x5d, 0xc3, 0x44,
+	0xbf, 0x50, 0x86, 0xdf, 0xd1, 0xf2, 0x0b, 0x4f, 0xa9, 0x2f, 0xa9, 0x83, 0xe7, 0x9a, 0x89, 0xcb,
+	0xda, 0x95, 0x63, 0xdc, 0x2b, 0x13, 0xe3, 0xfe, 0xf2, 0x46, 0x33, 0xb1, 0x2d, 0xf9, 0xa3, 0x63,
+	0xef, 0x98, 0xea, 0x7c, 0x1a, 0x8b, 0x37, 0x75, 0xb0, 0xee, 0x14, 0x5e, 0x21, 0xcf, 0xcd, 0x1a,
+	0xcb, 0xc4, 0x17, 0x6c, 0xe0, 0x67, 0x29, 0x40, 0xa5, 0x15, 0x31, 0x93, 0xd1, 0xd4, 0xe5, 0x2f,
+	0x06, 0xaa, 0xac, 0xa0, 0xc8, 0xa5, 0x75, 0x5b, 0x74, 0xdb, 0x05, 0x0d, 0xb0, 0xda, 0xa5, 0x8b,
+	0x65, 0x4b, 0xde, 0x38, 0x2c, 0x1b, 0xb3, 0x7f, 0xfa, 0x4b, 0x1e, 0xf3, 0x9f, 0x50, 0xa3, 0x46,
+	0xe1, 0x4c, 0x58, 0xff, 0x2b, 0x19, 0xf8, 0x3c, 0x87, 0xab, 0x65, 0x6b, 0xf9, 0x77, 0x53, 0x98,
+	0x60, 0x28, 0x75, 0xf2, 0x41, 0xc0, 0x1f, 0x70, 0xb4, 0x38, 0x09, 0xf3, 0x67, 0x3c, 0x26, 0x29,
+	0x6d, 0x23, 0xbc, 0x3f, 0x55, 0x49, 0x9c, 0x4f, 0x56, 0x35, 0xee, 0x57, 0xe8, 0x4f, 0x47, 0x93,
+	0x87, 0x78, 0x3b, 0x02, 0x89, 0x0e, 0x79, 0xad, 0x01, 0x51, 0x28, 0xb7, 0xac, 0x25, 0xf7, 0xb6,
+	0x4a, 0x87, 0x78, 0x5e, 0x5d, 0xc2, 0x33, 0x1b, 0xbf, 0x5d, 0x7a, 0xd3, 0xeb, 0x5b, 0x6c, 0x9d,
+	0x53, 0x8b, 0xcf, 0x24, 0x6e, 0xaf, 0x7a, 0x49, 0xca, 0xa9, 0x2c, 0x22, 0x2e, 0x99, 0x0b, 0x97,
+	0xa2, 0xd0, 0x4e, 0x2b, 0x5e, 0x03, 0x48, 0xdb, 0x65, 0x12, 0xb6, 0x2b, 0xb5, 0x55, 0xe7, 0x3b,
+	0xdf, 0x29, 0x27, 0x21, 0x2e, 0x9d, 0x14, 0xf7, 0xe7, 0x74, 0x9a, 0x53, 0xe4, 0xfd, 0x22, 0x72,
+	0x12, 0x67, 0x98, 0xcc, 0xc2, 0x19, 0x46, 0x39, 0x18, 0x65, 0xe7, 0x0f, 0x46, 0x89, 0x73, 0xc8,
+	0xda, 0xdc, 0x39, 0x64, 0x7e, 0x0f, 0x5d, 0x3f, 0xc7, 0x1e, 0xba, 0xb1, 0xe4, 0x1c, 0x30, 0x41,
+	0x07, 0x0d, 0xfc, 0xb1, 0x27, 0xcd, 0xf5, 0x98, 0x65, 0xe1, 0x79, 0xe5, 0x2b, 0xc8, 0x81, 0x3f,
+	0x8d, 0x02, 0x7f, 0x3c, 0xf6, 0x02, 0xce, 0x67, 0x71, 0x6a, 0x18, 0xee, 0xd0, 0x9b, 0x7a, 0x34,
+	0x20, 0x19, 0x22, 0x6b, 0x6d, 0xc6, 0xc0, 0xe6, 0xb0, 0xf4, 0xbb, 0x14, 0x10, 0xfd, 0xf0, 0xed,
+	0x74, 0x20, 0x76, 0xdc, 0x8f, 0xd8, 0x76, 0x5c, 0x5b, 0xf0, 0x1e, 0x27, 0x35, 0x65, 0x44, 0x69,
+	0xc1, 0xbb, 0x9c, 0x9f, 0x30, 0x4d, 0xf9, 0x86, 0x49, 0xdc, 0xb2, 0x01, 0xba, 0x6d, 0x80, 0xdb,
+	0x1c, 0xcc, 0x29, 0xcb, 0xec, 0x52, 0xe2, 0xa5, 0x34, 0x27, 0xc5, 0xfa, 0xee, 0x22, 0x20, 0x2c,
+	0x84, 0xf3, 0x9b, 0x4b, 0xaf, 0xd8, 0x36, 0xdf, 0x57, 0xdb, 0xfe, 0x70, 0x6f, 0x36, 0x84, 0x4c,
+	0x85, 0xed, 0x7a, 0x7c, 0x2b, 0x92, 0x1e, 0xf1, 0x6f, 0x76, 0xe4, 0x2b, 0x38, 0xda, 0xad, 0x6e,
+	0xae, 0x7e, 0x49, 0x67, 0x61, 0x99, 0xd0, 0xf6, 0x87, 0xa5, 0x1e, 0xbb, 0xc8, 0x45, 0xf3, 0x52,
+	0xc3, 0xe2, 0x51, 0xf3, 0x03, 0x56, 0x50, 0xf6, 0xb5, 0x95, 0x5d, 0x2e, 0x75, 0xef, 0x63, 0x13,
+	0x29, 0xa3, 0xf4, 0x82, 0x5d, 0xac, 0x8f, 0xfd, 0x37, 0xbc, 0x4f, 0xb5, 0x42, 0xdb, 0xc7, 0x2c,
+	0x27, 0xae, 0x0a, 0x91, 0xb2, 0x37, 0x56, 0xde, 0x25, 0xb2, 0x36, 0xe0, 0x17, 0xa8, 0xea, 0xb2,
+	0x2b, 0x20, 0x98, 0x57, 0xbe, 0x67, 0x49, 0xff, 0x36, 0xcb, 0xcb, 0x2b, 0x26, 0x2b, 0x6d, 0x21,
+	0x29, 0x2c, 0x3c, 0x76, 0xc0, 0x00, 0xdf, 0x65, 0x6b, 0x30, 0x40, 0xa8, 0x3f, 0x60, 0x6b, 0xa3,
+	0xc8, 0x9b, 0x88, 0xb9, 0xef, 0x2c, 0x57, 0x8e, 0xb6, 0x7d, 0x4e, 0x59, 0xfa, 0x3e, 0x5b, 0xe7,
+	0x76, 0x0c, 0xa1, 0x68, 0x50, 0x99, 0x57, 0x19, 0x8e, 0x17, 0x26, 0x82, 0xfb, 0x29, 0x63, 0x72,
+	0x6a, 0xe7, 0x90, 0xa0, 0x1c, 0x6b, 0x84, 0x84, 0x11, 0x2b, 0x80, 0x84, 0xea, 0x51, 0x7f, 0x7a,
+	0xe8, 0x85, 0xfa, 0x37, 0xd8, 0x7a, 0xe4, 0xbb, 0xfd, 0xa1, 0xb8, 0xac, 0xa9, 0x27, 0x64, 0xf0,
+	0x59, 0x5a, 0x6b, 0x91, 0x6f, 0x0c, 0x87, 0xfa, 0x7d, 0x96, 0x8f, 0x7c, 0x72, 0x43, 0x32, 0xd7,
+	0x32, 0xea, 0x5c, 0xe4, 0xa3, 0x4b, 0x42, 0x41, 0xa8, 0x49, 0x6d, 0xc5, 0x80, 0x9f, 0xce, 0x0d,
+	0x78, 0x7d, 0x41, 0x04, 0x4e, 0x4e, 0x8c, 0xfa, 0x78, 0x71, 0xd4, 0x95, 0x2c, 0x72, 0x68, 0xe2,
+	0x3a, 0xe6, 0xeb, 0x4e, 0x1d, 0xee, 0xb3, 0xb8, 0xd0, 0x41, 0x4a, 0x36, 0xcb, 0xf5, 0x28, 0x48,
+	0x97, 0x39, 0x8b, 0x0c, 0xeb, 0x95, 0xce, 0x22, 0x29, 0xac, 0x9c, 0x88, 0xf6, 0xd2, 0x0b, 0x96,
+	0x47, 0xa1, 0xdd, 0xe3, 0x68, 0x41, 0xea, 0x77, 0x19, 0x8b, 0xef, 0x10, 0x91, 0xd8, 0x9d, 0x55,
+	0x62, 0xfd, 0xe3, 0xc8, 0x22, 0x25, 0xba, 0xc7, 0xb0, 0xa5, 0x15, 0xd0, 0xa8, 0xe6, 0x89, 0x37,
+	0x5d, 0x14, 0xfd, 0x57, 0x59, 0x41, 0xc9, 0x30, 0x2b, 0x2b, 0x53, 0x85, 0xa6, 0x71, 0xc1, 0x62,
+	0x71, 0xf2, 0xa9, 0x6c, 0xb0, 0x35, 0x0f, 0x24, 0x97, 0xff, 0x5b, 0x8a, 0x15, 0x24, 0xe9, 0xd4,
+	0xd7, 0x35, 0xb6, 0xd9, 0xad, 0xf7, 0x7a, 0x6e, 0xb3, 0xb3, 0x6f, 0xb4, 0x9a, 0x35, 0xed, 0x82,
+	0xae, 0xb1, 0x1c, 0x87, 0xb4, 0x8d, 0x97, 0xda, 0xbb, 0x9f, 0xbd, 0x7f, 0xbf, 0xa1, 0x5f, 0x91,
+	0x34, 0x6e, 0xaf, 0x6b, 0x39, 0xda, 0xff, 0x7c, 0x0f, 0x50, 0x9d, 0x31, 0x0e, 0x75, 0x8c, 0x4a,
+	0xcb, 0xd4, 0xfe, 0x17, 0x87, 0x5d, 0x66, 0x05, 0x0e, 0xeb, 0x74, 0xad, 0xb6, 0xd1, 0xd2, 0x7e,
+	0x92, 0x20, 0xac, 0xb7, 0xba, 0xdd, 0x9a, 0xf6, 0xbf, 0x39, 0x4c, 0x0c, 0x62, 0xb4, 0x5a, 0xda,
+	0x4f, 0x39, 0xe4, 0x3a, 0xbb, 0xc8, 0x21, 0xd5, 0x6e, 0xc7, 0xb1, 0xba, 0xad, 0x96, 0x69, 0x69,
+	0xff, 0x27, 0xc1, 0xde, 0xea, 0x56, 0x8d, 0x96, 0xf6, 0xb3, 0x24, 0x7b, 0xe7, 0x95, 0xf6, 0x1e,
+	0x20, 0xe5, 0xff, 0xb8, 0x86, 0x2f, 0x91, 0xf9, 0x5e, 0xbc, 0xcd, 0x59, 0x1c, 0xb7, 0x61, 0xb6,
+	0x5a, 0x5d, 0xed, 0x82, 0x7c, 0x36, 0x2d, 0xab, 0x6b, 0x69, 0x29, 0xfd, 0x2a, 0xbb, 0x84, 0xcf,
+	0xd5, 0x46, 0xd7, 0xb5, 0xcc, 0xe7, 0x7b, 0xa6, 0xed, 0x68, 0x69, 0xfd, 0x32, 0x57, 0x41, 0x82,
+	0x7b, 0xad, 0x57, 0x5a, 0x26, 0xa6, 0x7d, 0xd9, 0x33, 0xad, 0x66, 0xdb, 0xec, 0x38, 0xa6, 0xa5,
+	0x65, 0xf5, 0x1b, 0xec, 0x2a, 0x07, 0xd7, 0x4d, 0xc3, 0xd9, 0xb3, 0x4c, 0x5b, 0x8a, 0x59, 0xd3,
+	0xaf, 0xb3, 0xcb, 0xf3, 0x28, 0x10, 0xb5, 0xae, 0xef, 0xb0, 0xeb, 0x1c, 0xb1, 0x6b, 0x3a, 0x30,
+	0xcd, 0x7a, 0x73, 0x57, 0x72, 0x6d, 0x48, 0x81, 0x09, 0x24, 0xf0, 0xe5, 0xa4, 0x5e, 0xb6, 0x44,
+	0x69, 0x79, 0x5d, 0x67, 0xdb, 0x1c, 0xd8, 0x33, 0xaa, 0xcf, 0x4c, 0xc7, 0x6d, 0x76, 0x34, 0x26,
+	0x75, 0xad, 0xb7, 0xba, 0x2f, 0x5c, 0xcb, 0x6c, 0x77, 0xf7, 0xcd, 0x9a, 0x56, 0xd0, 0xaf, 0x30,
+	0x0d, 0x49, 0xbb, 0x96, 0xe3, 0xda, 0x8e, 0xe1, 0xec, 0xd9, 0xda, 0xa6, 0x94, 0x4a, 0x02, 0xba,
+	0x7b, 0x8e, 0xb6, 0xa5, 0x5f, 0x62, 0x5b, 0xb1, 0x84, 0x76, 0xb7, 0xa6, 0x6d, 0xcb, 0x81, 0x76,
+	0xad, 0xee, 0x5e, 0x8f, 0xc3, 0x2e, 0x4a, 0x32, 0x2e, 0x11, 0x40, 0x9a, 0x24, 0xe3, 0xee, 0xc0,
+	0x61, 0x97, 0xf4, 0x9b, 0xec, 0x1a, 0x87, 0xb5, 0xf7, 0x5a, 0x4e, 0xb3, 0x67, 0x58, 0x8e, 0x9c,
+	0xaf, 0xae, 0x17, 0xd9, 0x95, 0x05, 0x1c, 0x4c, 0xf7, 0xb2, 0xc4, 0x54, 0x0c, 0xcb, 0x6a, 0x9a,
+	0x96, 0xe4, 0xb9, 0xa2, 0x5f, 0x63, 0xfa, 0x1c, 0x06, 0x38, 0xae, 0xea, 0xf7, 0xd8, 0x6d, 0x0e,
+	0x7f, 0xbe, 0x67, 0xee, 0x99, 0xcb, 0xcc, 0x7b, 0x4d, 0xbf, 0xc3, 0x76, 0x56, 0x91, 0x80, 0x8c,
+	0xeb, 0xd2, 0x76, 0x56, 0xb7, 0x65, 0x4a, 0xbe, 0xa2, 0xb4, 0x12, 0x81, 0x81, 0xf6, 0x86, 0x9c,
+	0x17, 0x88, 0x31, 0xec, 0x57, 0x9d, 0xaa, 0x64, 0xb8, 0x29, 0xb5, 0x57, 0x71, 0xc0, 0xb5, 0x23,
+	0x2d, 0x64, 0x0b, 0x8c, 0x76, 0x4b, 0xc2, 0xda, 0xa6, 0x63, 0x5a, 0xdc, 0x6a, 0xb7, 0xcb, 0x55,
+	0xbc, 0x85, 0x31, 0xf7, 0x67, 0x08, 0x88, 0xb4, 0xc1, 0xd7, 0x5a, 0xc4, 0x2a, 0x0e, 0x06, 0xb0,
+	0x7d, 0xd3, 0xb2, 0x9b, 0xdd, 0x4e, 0xa5, 0xe9, 0xb4, 0x8d, 0x9e, 0x96, 0x2a, 0x7b, 0x58, 0xcd,
+	0x50, 0x65, 0x8c, 0x9d, 0x02, 0xf4, 0x83, 0xaa, 0x5b, 0xb7, 0x8c, 0x5d, 0x11, 0xa2, 0x17, 0x48,
+	0x2e, 0x41, 0x6b, 0x56, 0xb7, 0xa7, 0xa5, 0x68, 0xd6, 0x04, 0xb3, 0x4c, 0xc3, 0x6e, 0x6b, 0xe9,
+	0x24, 0x61, 0xdb, 0xb0, 0x9f, 0x69, 0x99, 0xf2, 0x53, 0x1c, 0x06, 0xdf, 0x24, 0x50, 0xd1, 0x44,
+	0xce, 0x51, 0x55, 0xf4, 0x24, 0xe7, 0xae, 0xba, 0x35, 0xb3, 0x67, 0x99, 0x55, 0xc3, 0x31, 0x6b,
+	0x42, 0xc2, 0xaf, 0xe1, 0x37, 0xd0, 0x78, 0x57, 0x9c, 0x58, 0xd5, 0x29, 0x6e, 0xb3, 0x3c, 0x82,
+	0x20, 0x1f, 0xfd, 0x2c, 0x15, 0x3f, 0x43, 0xea, 0x78, 0x9f, 0x2a, 0xff, 0x07, 0xaa, 0xdb, 0x12,
+	0x7d, 0x04, 0xcc, 0x6a, 0xaa, 0x06, 0x72, 0x46, 0xe0, 0xd8, 0x10, 0x03, 0xb6, 0x96, 0x92, 0x06,
+	0x41, 0x9f, 0x45, 0x68, 0x5a, 0x92, 0xca, 0x70, 0xb1, 0xb5, 0xac, 0x24, 0xc5, 0x28, 0x40, 0x68,
+	0x8e, 0xf4, 0xad, 0xba, 0xcd, 0x1e, 0x59, 0xe9, 0xae, 0x24, 0x44, 0x47, 0x43, 0xc2, 0xa7, 0xfa,
+	0x35, 0xee, 0x5d, 0x24, 0xb3, 0xd2, 0xea, 0x56, 0x9f, 0x99, 0x35, 0xed, 0x5d, 0xba, 0x7c, 0xa2,
+	0x7c, 0x02, 0x9f, 0x30, 0xdf, 0x12, 0xe5, 0x05, 0x7b, 0xad, 0xfb, 0xa2, 0xa3, 0xa5, 0x62, 0xba,
+	0x0e, 0x24, 0xab, 0xea, 0xbe, 0x96, 0x15, 0xc9, 0x9c, 0x83, 0xea, 0x2f, 0x6a, 0xda, 0x5d, 0x8a,
+	0x18, 0x84, 0xc4, 0x99, 0xe2, 0x69, 0xf9, 0xaf, 0xcd, 0xbd, 0x43, 0x11, 0xa6, 0xef, 0xd9, 0x8b,
+	0xc3, 0xda, 0x6e, 0xab, 0xd9, 0x79, 0x36, 0x37, 0xac, 0x2d, 0x67, 0x91, 0xa6, 0xf4, 0xca, 0xe9,
+	0xf6, 0x4d, 0x2d, 0x5b, 0xfe, 0xb3, 0x34, 0x7e, 0x78, 0xc2, 0xa5, 0xcb, 0xde, 0x11, 0x31, 0xd6,
+	0x95, 0x01, 0x24, 0xe8, 0xc1, 0x67, 0xed, 0x8a, 0xdb, 0xa8, 0xc5, 0xe2, 0x09, 0x54, 0xaf, 0x49,
+	0xbf, 0xe3, 0x20, 0x22, 0xcb, 0xce, 0xc3, 0xea, 0x35, 0x2d, 0x27, 0x66, 0x5f, 0x77, 0x1f, 0xec,
+	0x72, 0x2a, 0x2d, 0x09, 0xa9, 0x83, 0x3d, 0x14, 0xf1, 0x08, 0x7a, 0xaa, 0xeb, 0x02, 0xf4, 0x98,
+	0x40, 0xef, 0xc0, 0xff, 0x63, 0xf1, 0x04, 0x4c, 0xeb, 0x97, 0xa4, 0x34, 0x07, 0x41, 0x60, 0xf0,
+	0x02, 0x82, 0xba, 0x4e, 0xc3, 0xb4, 0xb4, 0x77, 0xb9, 0x98, 0xa8, 0xda, 0xed, 0xf5, 0x00, 0xa4,
+	0xc5, 0x44, 0xf5, 0x66, 0x05, 0x20, 0x77, 0xe3, 0x21, 0x8d, 0x3d, 0xa7, 0xdb, 0x31, 0x77, 0xb5,
+	0x77, 0x4f, 0xf5, 0x4b, 0x82, 0xaa, 0x67, 0xec, 0xd9, 0xa6, 0xf6, 0xee, 0x5d, 0x4a, 0xbf, 0xc6,
+	0x5d, 0x49, 0x80, 0x20, 0x67, 0xb4, 0xb5, 0x77, 0xef, 0xd2, 0xe5, 0x9a, 0xe2, 0x34, 0x74, 0x9d,
+	0x75, 0x8b, 0x47, 0x45, 0xcf, 0x72, 0x8d, 0x1a, 0xee, 0xe1, 0x9b, 0xf8, 0x58, 0x33, 0x5b, 0xa6,
+	0x63, 0x6a, 0xa9, 0x18, 0xd2, 0xee, 0xd6, 0x9a, 0xf5, 0x57, 0x5a, 0xba, 0xfc, 0x08, 0x5d, 0x20,
+	0xfe, 0xb3, 0x02, 0x64, 0xd4, 0x36, 0x77, 0xfa, 0x4e, 0xcd, 0xb0, 0x40, 0x12, 0x0a, 0x6e, 0x3b,
+	0x6e, 0xf7, 0x65, 0x5b, 0x4b, 0x95, 0xbf, 0x88, 0xff, 0x6e, 0x00, 0xff, 0x43, 0x00, 0x24, 0xf7,
+	0x65, 0xbb, 0xea, 0x76, 0x5e, 0xb6, 0xdd, 0xcf, 0xe4, 0xd8, 0x02, 0xf2, 0x40, 0x4b, 0xe9, 0x3b,
+	0x3c, 0xfa, 0x01, 0xd2, 0xed, 0x99, 0x1d, 0x1e, 0x81, 0x15, 0xc3, 0x6e, 0x56, 0x61, 0x32, 0xfa,
+	0x0d, 0xbe, 0x5b, 0x02, 0x32, 0xb1, 0xc3, 0xbe, 0x7f, 0x9f, 0x29, 0xff, 0xa3, 0x1c, 0xbb, 0xbc,
+	0xe4, 0x53, 0x7c, 0x72, 0xea, 0x97, 0xa0, 0x54, 0xbd, 0x22, 0xab, 0x92, 0x0b, 0x94, 0x96, 0x55,
+	0x78, 0xe3, 0x15, 0xe2, 0x52, 0xb4, 0x29, 0x0b, 0x5c, 0xdb, 0x74, 0x8c, 0x9a, 0xe1, 0x18, 0x5a,
+	0x7a, 0x4e, 0x98, 0xe9, 0x34, 0xdc, 0x9a, 0xed, 0x68, 0x99, 0x25, 0x70, 0xdb, 0xaa, 0x6a, 0xd9,
+	0x39, 0x41, 0x00, 0x77, 0x5e, 0xf5, 0x4c, 0xb9, 0xed, 0x0b, 0xc4, 0x7e, 0xcb, 0xe8, 0xb8, 0xfb,
+	0xcd, 0x9a, 0xb6, 0xbe, 0x0c, 0xd1, 0xab, 0xf6, 0xb4, 0x8d, 0xf9, 0x79, 0xf4, 0xdc, 0x9a, 0x5d,
+	0xed, 0x69, 0x39, 0xda, 0x8a, 0x14, 0xb8, 0x59, 0xed, 0x68, 0xf9, 0x39, 0x39, 0xcd, 0x9e, 0xdb,
+	0xb3, 0xba, 0x4e, 0x57, 0x63, 0x0b, 0x88, 0xfd, 0xc7, 0x5c, 0xd7, 0xc2, 0x32, 0x04, 0x4c, 0x6e,
+	0x73, 0x6e, 0x64, 0xa7, 0xda, 0xe3, 0x0c, 0x5b, 0x4b, 0xe0, 0x40, 0xbf, 0x3d, 0x07, 0xdf, 0xab,
+	0x21, 0xfd, 0xc5, 0x25, 0x70, 0xa0, 0xd7, 0xe6, 0x06, 0xb6, 0xab, 0x0e, 0x32, 0x5c, 0x5a, 0x86,
+	0xa8, 0xf1, 0x72, 0x60, 0x6e, 0xed, 0xaa, 0x6d, 0x50, 0x96, 0x5b, 0xf6, 0xf2, 0x72, 0x5c, 0xb5,
+	0x5b, 0x33, 0xb5, 0x2b, 0x73, 0xb6, 0x32, 0xac, 0x9e, 0xdb, 0xed, 0x69, 0x57, 0xe7, 0x14, 0x03,
+	0xb0, 0xdd, 0x33, 0xb4, 0x6b, 0x4b, 0xe0, 0x4e, 0xcf, 0xd0, 0xae, 0x2f, 0xa3, 0x6f, 0x18, 0x5a,
+	0x71, 0x19, 0x7d, 0xc3, 0xd0, 0x6e, 0x2c, 0x5a, 0xf6, 0x09, 0x9f, 0xe0, 0xcd, 0x65, 0x08, 0x98,
+	0xe0, 0xce, 0xfc, 0x24, 0x00, 0x51, 0x6f, 0x19, 0x15, 0xb3, 0xa5, 0xdd, 0x5a, 0x36, 0xc1, 0x27,
+	0x38, 0xf9, 0xdb, 0xcb, 0x71, 0x7c, 0xf2, 0x1f, 0xe8, 0xb7, 0xd9, 0x8d, 0x79, 0x99, 0x9d, 0x9a,
+	0xeb, 0x18, 0xd6, 0xae, 0xe9, 0x68, 0x77, 0x96, 0x0d, 0xd9, 0xa9, 0xb9, 0x76, 0xab, 0xa5, 0xdd,
+	0x5d, 0x81, 0x73, 0x5a, 0x2d, 0xed, 0x1e, 0xed, 0xd6, 0x32, 0x56, 0x7a, 0x2d, 0xdb, 0x45, 0x4d,
+	0x4b, 0x73, 0xf6, 0xe0, 0x28, 0xa7, 0xaa, 0x7d, 0x38, 0x1f, 0x5e, 0x00, 0xaf, 0x74, 0x6d, 0xed,
+	0xa3, 0x39, 0x44, 0xaf, 0x52, 0x71, 0x9b, 0x76, 0xb3, 0xa6, 0x7d, 0x4c, 0xa5, 0x8b, 0x74, 0xb5,
+	0xbd, 0x4e, 0xc7, 0x6c, 0xb9, 0xcd, 0x9a, 0xf6, 0xb5, 0x65, 0xaa, 0x99, 0x2f, 0x9d, 0x46, 0xcd,
+	0xd2, 0xbe, 0x5e, 0x7e, 0x84, 0xa7, 0x17, 0xfe, 0xe1, 0xf8, 0x68, 0xa8, 0x5f, 0xe4, 0x49, 0x73,
+	0xbf, 0x59, 0x73, 0x3b, 0xdd, 0x8e, 0xc9, 0xb7, 0xac, 0x6d, 0x02, 0xf4, 0x2c, 0xd3, 0x36, 0x3b,
+	0x8e, 0xf6, 0xee, 0x6e, 0xf9, 0x3f, 0xa5, 0xb0, 0x8f, 0x37, 0x9a, 0x9d, 0x3c, 0xa1, 0x0f, 0x9d,
+	0xc5, 0x6d, 0x54, 0xa0, 0x6e, 0x9a, 0x8d, 0x85, 0x3d, 0x09, 0x60, 0x20, 0xf2, 0x25, 0xe4, 0x0e,
+	0xdc, 0xdf, 0x00, 0x64, 0xda, 0x3d, 0x2d, 0x4d, 0xa3, 0xc2, 0xb3, 0xb1, 0xe7, 0x34, 0xb4, 0xac,
+	0x02, 0xa8, 0x41, 0x11, 0x98, 0x53, 0x00, 0x50, 0x2c, 0x69, 0x9a, 0x22, 0xd5, 0xea, 0xee, 0x41,
+	0x7e, 0xbb, 0xab, 0x48, 0x6d, 0x74, 0x7b, 0xda, 0x53, 0xda, 0x39, 0xe0, 0x79, 0xaf, 0x63, 0x99,
+	0x3d, 0xd8, 0x86, 0x54, 0x90, 0x6d, 0x3e, 0x87, 0x82, 0xe1, 0xa7, 0xe9, 0xc4, 0x97, 0xa6, 0xf4,
+	0xc7, 0xaf, 0x80, 0xcc, 0xe0, 0x35, 0x7c, 0x6f, 0x0f, 0x32, 0x21, 0x2e, 0x93, 0x01, 0x45, 0x6e,
+	0xef, 0x95, 0xeb, 0x38, 0x2d, 0x5e, 0xde, 0x17, 0x28, 0x5a, 0x54, 0x78, 0xb3, 0x23, 0xd3, 0x81,
+	0x81, 0xa5, 0x29, 0x2e, 0xaa, 0xd3, 0x92, 0xe1, 0x6d, 0x38, 0x6e, 0xcd, 0xac, 0xc6, 0x70, 0x8d,
+	0x0a, 0x03, 0xc3, 0x71, 0x7b, 0x7b, 0x76, 0x83, 0x67, 0x34, 0xed, 0x12, 0x19, 0x13, 0x80, 0xdd,
+	0x1e, 0xc2, 0xf4, 0x39, 0x42, 0x90, 0xa0, 0x5d, 0x4e, 0x12, 0x72, 0xd8, 0x95, 0x98, 0x10, 0x34,
+	0xe0, 0xa5, 0x93, 0x76, 0x95, 0xac, 0x68, 0xd0, 0xd1, 0x43, 0xbb, 0x46, 0xb5, 0x15, 0x51, 0x75,
+	0x5e, 0x70, 0x6d, 0xae, 0xc7, 0x50, 0xd0, 0x92, 0xa0, 0xc5, 0xa4, 0xc4, 0x7a, 0xd3, 0x6c, 0xd5,
+	0xb4, 0x1b, 0xca, 0xd0, 0xa0, 0x4f, 0xaf, 0x52, 0xd1, 0x6e, 0xd2, 0xd2, 0x90, 0x3a, 0x00, 0xda,
+	0xd1, 0x8b, 0x62, 0xde, 0x0b, 0x5b, 0xd2, 0x3e, 0xde, 0x57, 0x51, 0xfa, 0x8c, 0xf4, 0x05, 0xb1,
+	0xa8, 0x8e, 0xdb, 0xad, 0xc4, 0x51, 0x9a, 0x11, 0x0c, 0x8a, 0xd7, 0xff, 0xfe, 0x3e, 0x43, 0x5b,
+	0x3a, 0x40, 0x3a, 0x5d, 0xb7, 0xb2, 0x57, 0xaf, 0x93, 0xdc, 0xff, 0x2a, 0x5c, 0x54, 0xf9, 0x4a,
+	0x90, 0xaf, 0x2d, 0x39, 0x8e, 0x5a, 0x11, 0xe3, 0x7c, 0x9b, 0x8e, 0xbb, 0xdb, 0x75, 0xba, 0x74,
+	0xfc, 0x4e, 0x51, 0x3c, 0x35, 0x1d, 0xf7, 0x85, 0xd5, 0x74, 0x4c, 0x75, 0x87, 0xc3, 0x10, 0x94,
+	0x18, 0xa3, 0xea, 0x34, 0xbb, 0x1d, 0x5b, 0xcb, 0xc4, 0x08, 0xa3, 0xd7, 0x6b, 0xbd, 0x92, 0x88,
+	0x6c, 0x8c, 0xa8, 0xb6, 0x4c, 0xc3, 0x92, 0x88, 0x35, 0xe1, 0xd7, 0x74, 0x5e, 0xd1, 0xd6, 0xc9,
+	0x52, 0xcd, 0x25, 0x96, 0xfa, 0x9b, 0x38, 0xa1, 0xf9, 0xaf, 0x03, 0xa9, 0xa0, 0xa8, 0x57, 0x13,
+	0x95, 0x4a, 0xbd, 0x2a, 0xea, 0x12, 0xb1, 0x53, 0x4b, 0x88, 0x6b, 0x3b, 0x56, 0xb3, 0x0a, 0xc7,
+	0x73, 0x49, 0x4a, 0x45, 0x4d, 0x26, 0x26, 0x45, 0x88, 0x20, 0xcd, 0x96, 0xff, 0x39, 0xbd, 0xbe,
+	0x94, 0xa3, 0x63, 0xbc, 0xa3, 0x31, 0xeb, 0x6a, 0x09, 0x4a, 0x22, 0xea, 0xae, 0x6d, 0x76, 0x6a,
+	0xf2, 0xe0, 0x1c, 0xab, 0x51, 0x77, 0xab, 0x0d, 0xb3, 0xfa, 0xcc, 0xed, 0xee, 0x9b, 0x56, 0xcb,
+	0xe8, 0xc9, 0x82, 0xa1, 0x5e, 0x77, 0x21, 0xc1, 0x40, 0x24, 0xed, 0x75, 0x9c, 0xd8, 0x68, 0xf5,
+	0x3a, 0x2f, 0xb5, 0x9f, 0x49, 0x44, 0x2e, 0x81, 0xa8, 0xbc, 0x92, 0x08, 0xad, 0x6c, 0xe3, 0xd1,
+	0x07, 0xbf, 0xe3, 0xc6, 0xd9, 0xed, 0x2e, 0x34, 0x62, 0x76, 0x95, 0x46, 0x8c, 0x80, 0xc4, 0x5d,
+	0x13, 0x09, 0x91, 0x8d, 0x90, 0xcf, 0xf1, 0x25, 0xdd, 0xc2, 0xf7, 0x76, 0x64, 0xf8, 0xdd, 0xa4,
+	0xe1, 0x77, 0x15, 0xc3, 0x4b, 0x08, 0xd9, 0x37, 0x5d, 0xb6, 0xd5, 0x1b, 0x1e, 0xdc, 0x1d, 0x49,
+	0x08, 0x9e, 0xbe, 0xa4, 0x10, 0x08, 0xb2, 0x96, 0x59, 0x85, 0x5c, 0x89, 0x61, 0xb0, 0x0b, 0xfe,
+	0x5a, 0x6b, 0x5a, 0x26, 0x5f, 0xb8, 0x4d, 0x54, 0xd2, 0x71, 0xeb, 0x75, 0x2d, 0x53, 0xee, 0xa1,
+	0x63, 0xcc, 0x7f, 0x95, 0x46, 0x8b, 0x63, 0x81, 0x95, 0xda, 0x86, 0x53, 0x6d, 0x68, 0x17, 0xc8,
+	0xdd, 0x84, 0x03, 0xca, 0x03, 0x9b, 0x25, 0x8c, 0xc4, 0x43, 0x3d, 0x5d, 0xfe, 0x07, 0x29, 0x7c,
+	0xc1, 0xb2, 0xe4, 0x7b, 0x2f, 0x5a, 0x2d, 0xcb, 0x72, 0x9b, 0xb5, 0x96, 0xe9, 0x3a, 0xcd, 0xb6,
+	0xd9, 0x55, 0x32, 0xa4, 0x65, 0xb9, 0x0d, 0xc3, 0xaa, 0x49, 0xb8, 0x30, 0x82, 0x25, 0x2b, 0xe7,
+	0x74, 0x4c, 0x89, 0x47, 0x3f, 0xe9, 0x7c, 0x12, 0x8e, 0x67, 0x77, 0x82, 0x67, 0xcb, 0x53, 0xfa,
+	0xa3, 0x5e, 0xfc, 0x9d, 0x38, 0x95, 0xcf, 0xee, 0x0f, 0x4d, 0xab, 0x2b, 0x97, 0xb4, 0x8d, 0x4b,
+	0xfa, 0xee, 0xa7, 0xef, 0x37, 0xf4, 0xab, 0x7c, 0xd6, 0x6d, 0xd7, 0x6e, 0x75, 0x5f, 0xf4, 0x0c,
+	0xa7, 0x41, 0x4d, 0x2f, 0xec, 0x86, 0xb5, 0xd5, 0x6e, 0x98, 0xda, 0xf9, 0x6a, 0xe3, 0xe9, 0x97,
+	0x2f, 0xf8, 0x64, 0xe1, 0x8b, 0x22, 0xb5, 0x98, 0xaf, 0xa8, 0x99, 0x03, 0xed, 0x09, 0x30, 0x3a,
+	0xe7, 0xe3, 0x1c, 0x38, 0xc0, 0xae, 0xc2, 0x19, 0xb6, 0x6d, 0x58, 0xcf, 0x34, 0x51, 0x94, 0x03,
+	0x7c, 0x21, 0xae, 0x3f, 0x57, 0x3f, 0x0f, 0x5b, 0xf4, 0xaf, 0x76, 0xd2, 0xbf, 0xda, 0x0b, 0xfe,
+	0xd5, 0x56, 0xfc, 0xeb, 0x50, 0x7d, 0x69, 0xaf, 0x86, 0x68, 0xbb, 0x9e, 0xe8, 0x00, 0x30, 0x04,
+	0x3d, 0xab, 0xf4, 0xe0, 0xd4, 0x4e, 0xb3, 0xa8, 0x43, 0x94, 0xf5, 0x6c, 0xb9, 0x1f, 0xb7, 0xeb,
+	0x6e, 0x65, 0xcf, 0xb2, 0x1d, 0xb9, 0x1f, 0xb7, 0xeb, 0xe2, 0x9c, 0x5e, 0xfe, 0x17, 0x74, 0x67,
+	0x10, 0x3f, 0xeb, 0xe1, 0xf6, 0xc1, 0xa9, 0x9b, 0xd4, 0x24, 0x74, 0xeb, 0x46, 0xb3, 0x65, 0xc2,
+	0x68, 0xb8, 0x45, 0x9a, 0x8e, 0x5b, 0x31, 0x6a, 0xb2, 0xad, 0x23, 0x3c, 0x8f, 0xc0, 0xe4, 0x8f,
+	0x69, 0xaa, 0x94, 0x08, 0xda, 0xec, 0xd8, 0x8e, 0xb5, 0x87, 0xa8, 0x0c, 0xed, 0x3f, 0x84, 0x42,
+	0x87, 0xce, 0xc6, 0xf4, 0xa2, 0xbf, 0x26, 0xc6, 0x5d, 0xa3, 0xaa, 0xc7, 0x54, 0xfa, 0x6c, 0x02,
+	0xb7, 0x1e, 0xb3, 0x89, 0x7e, 0x9b, 0x40, 0x6d, 0xc4, 0x6c, 0xb2, 0xef, 0x26, 0x70, 0xb9, 0x98,
+	0x0d, 0x7b, 0x11, 0xdd, 0x9e, 0x40, 0xe5, 0xf5, 0x0f, 0xd8, 0x4d, 0x44, 0xd9, 0x2f, 0x9a, 0x4e,
+	0xb5, 0x21, 0x9a, 0x61, 0x84, 0x67, 0x54, 0x59, 0x9a, 0xc9, 0x76, 0x98, 0x40, 0x17, 0xe2, 0x51,
+	0x65, 0xdf, 0x4a, 0xe0, 0x36, 0xa9, 0xd3, 0x26, 0x35, 0x92, 0x5d, 0x50, 0x22, 0xd8, 0xa2, 0x3d,
+	0xc3, 0x5c, 0xe2, 0x5b, 0x15, 0xf5, 0x4f, 0x76, 0xbe, 0xee, 0x8f, 0xc6, 0xfc, 0xee, 0x28, 0xff,
+	0x8b, 0x57, 0xe0, 0x8f, 0x8d, 0x7a, 0xd5, 0x6d, 0x76, 0xaa, 0xdd, 0x76, 0xcf, 0x70, 0x9a, 0xb0,
+	0xeb, 0x09, 0x2f, 0x03, 0x84, 0xd9, 0x33, 0x2d, 0x38, 0xa1, 0xfe, 0x45, 0x1a, 0xf3, 0xcb, 0x41,
+	0x7f, 0x28, 0x5e, 0x1b, 0xa2, 0x0c, 0x5c, 0xf0, 0x8a, 0x55, 0xe5, 0x2b, 0x42, 0xfd, 0x32, 0xd9,
+	0xe5, 0x10, 0x70, 0x5e, 0x75, 0x8b, 0xdd, 0x54, 0x00, 0x65, 0x8f, 0x52, 0x4b, 0x53, 0x13, 0x57,
+	0x60, 0x12, 0x53, 0x10, 0x1b, 0x92, 0x82, 0x44, 0x79, 0xa2, 0x33, 0x03, 0x08, 0xd4, 0x73, 0x8d,
+	0xe2, 0x53, 0x90, 0xb6, 0xcc, 0x8e, 0x3c, 0x29, 0x72, 0x18, 0x2f, 0x0d, 0x5c, 0xb3, 0xdd, 0x73,
+	0x5e, 0xc9, 0xe6, 0xb0, 0x82, 0xd8, 0xeb, 0x3c, 0xeb, 0x74, 0x5f, 0x74, 0xe4, 0xee, 0x22, 0xd5,
+	0xe7, 0x36, 0x6f, 0xc2, 0x12, 0xc7, 0xf3, 0x6a, 0xda, 0xae, 0xdd, 0x32, 0xf6, 0x4d, 0x8d, 0xcd,
+	0x4d, 0x96, 0x9f, 0x8d, 0x45, 0x55, 0x28, 0x81, 0xbc, 0x4d, 0xa4, 0x6d, 0xea, 0x1f, 0xb1, 0xbb,
+	0x04, 0x8e, 0x7b, 0xb4, 0x34, 0x3c, 0xec, 0x86, 0xe0, 0xc2, 0xda, 0x56, 0xf9, 0x0f, 0x32, 0x98,
+	0x7f, 0xc0, 0xde, 0x54, 0x94, 0x72, 0x73, 0xd3, 0x48, 0x86, 0x62, 0x56, 0xd1, 0x6b, 0x14, 0x40,
+	0x98, 0x74, 0x4a, 0x18, 0xd4, 0x58, 0x62, 0x50, 0x51, 0xbb, 0x28, 0x48, 0x94, 0x94, 0x99, 0x43,
+	0x74, 0xf7, 0x30, 0x36, 0xe4, 0x36, 0x2c, 0x10, 0x86, 0xb5, 0xbb, 0x07, 0xc2, 0xb4, 0x35, 0xb1,
+	0x04, 0x86, 0x58, 0x82, 0x75, 0x45, 0x45, 0xa7, 0x0b, 0x9b, 0x4e, 0x07, 0x4c, 0x8d, 0x81, 0x2e,
+	0xf8, 0xb1, 0x14, 0xcd, 0x09, 0x7f, 0x50, 0x86, 0xc3, 0x9a, 0x34, 0x4f, 0x91, 0x02, 0x18, 0x1e,
+	0xe4, 0xdc, 0x41, 0x3b, 0x76, 0xd3, 0x76, 0x60, 0x54, 0xa6, 0xdf, 0x62, 0x45, 0x42, 0xef, 0x75,
+	0xec, 0xbd, 0x1e, 0x28, 0x69, 0xd6, 0xdc, 0xae, 0x55, 0x33, 0x2d, 0xad, 0x30, 0x67, 0x0f, 0xc7,
+	0xd8, 0xd5, 0x36, 0xe7, 0x26, 0x00, 0x25, 0x06, 0x9f, 0xb2, 0x38, 0x9c, 0xab, 0x08, 0x30, 0xe0,
+	0xf6, 0x9c, 0x01, 0x79, 0x77, 0x59, 0xcc, 0xfa, 0x62, 0xf9, 0x27, 0x29, 0x56, 0x14, 0xcb, 0xa3,
+	0x16, 0x97, 0x4a, 0x58, 0x55, 0x9a, 0x55, 0xe1, 0x4f, 0x3c, 0x87, 0xc9, 0x24, 0x88, 0x08, 0x7b,
+	0xaf, 0x87, 0xe0, 0x94, 0x42, 0x9f, 0xf0, 0x35, 0x91, 0x07, 0x63, 0x7a, 0x59, 0x7d, 0x66, 0x28,
+	0xd3, 0x2c, 0xa2, 0xb0, 0xff, 0x9b, 0x15, 0xda, 0x37, 0x97, 0x2c, 0xff, 0xda, 0xdc, 0x80, 0x72,
+	0xf9, 0xd7, 0x85, 0xe1, 0x9a, 0xb1, 0x23, 0x6d, 0x88, 0x05, 0x6e, 0x8a, 0x05, 0xce, 0x95, 0xff,
+	0x25, 0x7d, 0x0d, 0x00, 0x93, 0xc7, 0x3e, 0x97, 0xea, 0x9a, 0xed, 0x65, 0xae, 0xd9, 0x56, 0x5d,
+	0x33, 0x09, 0x83, 0xe5, 0x91, 0xf1, 0x4f, 0xb0, 0x5a, 0x0b, 0xb6, 0x3b, 0x8b, 0x9a, 0xd9, 0x73,
+	0xc8, 0xce, 0x0b, 0x05, 0x99, 0x15, 0x3e, 0x44, 0xc8, 0x17, 0xcd, 0x56, 0xad, 0x6a, 0x58, 0x35,
+	0x28, 0xab, 0xc9, 0xe7, 0x08, 0x83, 0x87, 0x95, 0xf5, 0x39, 0xe8, 0xbe, 0xd1, 0xda, 0x33, 0xb5,
+	0x8d, 0x39, 0xe5, 0xb9, 0x68, 0xd1, 0x31, 0x12, 0xc0, 0x9e, 0x65, 0x5a, 0xe6, 0x73, 0x2d, 0xaf,
+	0x48, 0xa8, 0xed, 0xf5, 0x48, 0x2e, 0x13, 0x76, 0x6a, 0x0b, 0x3b, 0x15, 0xca, 0x7f, 0x4c, 0x4e,
+	0x12, 0x97, 0xcb, 0x4a, 0xee, 0xc5, 0x01, 0xeb, 0xed, 0xba, 0xf4, 0x12, 0x59, 0x3e, 0x71, 0x20,
+	0xa5, 0xf9, 0xbd, 0x56, 0x4b, 0xe6, 0x4d, 0x0e, 0x9f, 0x73, 0x11, 0x45, 0x8c, 0xa8, 0xa5, 0x33,
+	0xa2, 0x20, 0x6f, 0xcb, 0xfc, 0x2d, 0xcb, 0x68, 0x29, 0x81, 0x2a, 0xb3, 0xb5, 0x79, 0x44, 0xb5,
+	0xdb, 0x6e, 0x1b, 0x1d, 0xb0, 0x13, 0x4e, 0x5e, 0x22, 0xea, 0x2d, 0x63, 0xd7, 0xd6, 0x36, 0xca,
+	0x7f, 0x98, 0xc1, 0xcf, 0xc9, 0xe2, 0x4a, 0x58, 0x9d, 0x15, 0x2a, 0xba, 0x0b, 0x4c, 0xb8, 0xe1,
+	0x9a, 0x2f, 0x9b, 0xb6, 0x63, 0xcb, 0x77, 0x15, 0x1c, 0x23, 0xca, 0x4c, 0x8c, 0xf5, 0x14, 0xf9,
+	0x32, 0x47, 0xbd, 0x30, 0x9b, 0xbb, 0x0d, 0x47, 0x0d, 0x6a, 0x19, 0x06, 0x1c, 0x0f, 0x29, 0xa2,
+	0x5b, 0x47, 0x4e, 0x38, 0x6b, 0xe1, 0x8e, 0xa9, 0xa2, 0x2a, 0x7b, 0x90, 0x67, 0xe1, 0xe4, 0x70,
+	0x97, 0xdd, 0x12, 0xb8, 0x6a, 0xc3, 0x68, 0x76, 0x9a, 0x9d, 0xdd, 0x84, 0xe0, 0x35, 0x4a, 0x32,
+	0x38, 0x30, 0xcf, 0x32, 0x2a, 0x7a, 0x5d, 0x94, 0xe1, 0x80, 0x6e, 0x75, 0xbb, 0x3d, 0xb9, 0x61,
+	0xec, 0x2a, 0x8b, 0x46, 0x93, 0xc8, 0xa9, 0x28, 0x3e, 0x9a, 0x59, 0x93, 0xb9, 0x0c, 0xfd, 0x65,
+	0x57, 0xda, 0x1e, 0x22, 0x43, 0xb4, 0x17, 0x77, 0xe7, 0x0d, 0x5f, 0x20, 0x27, 0x90, 0x08, 0x9c,
+	0x90, 0xb6, 0x49, 0x0b, 0x22, 0xe1, 0x5c, 0x63, 0xf9, 0x6e, 0x71, 0x37, 0x5e, 0xec, 0xed, 0xf2,
+	0xef, 0x91, 0xe3, 0x89, 0xbf, 0xc6, 0x9b, 0x58, 0x22, 0xd4, 0xa6, 0x27, 0xc4, 0x50, 0x93, 0x17,
+	0xb5, 0x91, 0xd0, 0x06, 0xc6, 0x98, 0xac, 0x65, 0x7b, 0xb1, 0x9a, 0xfc, 0x45, 0xa9, 0x58, 0x14,
+	0x09, 0x37, 0x6a, 0xfb, 0xa6, 0xe5, 0x34, 0x6d, 0x53, 0xba, 0x5f, 0x4f, 0x71, 0xbf, 0xf2, 0x5f,
+	0x47, 0xa7, 0x91, 0x7f, 0xb2, 0x3a, 0xa1, 0x11, 0xbd, 0x23, 0x4c, 0x78, 0xb7, 0x0c, 0x06, 0x67,
+	0x6e, 0x64, 0xf1, 0x2e, 0xc3, 0x89, 0xc5, 0xa7, 0xcb, 0x3f, 0xc4, 0xf9, 0xe2, 0x95, 0x2c, 0x7f,
+	0xb6, 0x64, 0xbe, 0xcf, 0xbb, 0xc9, 0xf9, 0xe2, 0x98, 0x12, 0x8a, 0x1b, 0x92, 0x90, 0xcd, 0xc1,
+	0x42, 0xf6, 0xdf, 0x60, 0xb7, 0x17, 0xfe, 0x78, 0xf7, 0x12, 0xf5, 0xed, 0x6a, 0x22, 0x50, 0x44,
+	0x01, 0x24, 0xc1, 0x98, 0xfa, 0x50, 0x3e, 0x07, 0xc6, 0xba, 0xdf, 0x9a, 0xbf, 0x90, 0x95, 0x10,
+	0x4f, 0x07, 0x38, 0xab, 0x5e, 0x85, 0xba, 0x9b, 0x5b, 0x46, 0x01, 0x71, 0x8f, 0x8d, 0x8f, 0x70,
+	0x16, 0x8d, 0x06, 0xf5, 0xa5, 0x96, 0x2e, 0xff, 0x51, 0x1a, 0xed, 0x1e, 0x1f, 0x2b, 0x16, 0x53,
+	0x50, 0x3b, 0x99, 0x82, 0x30, 0x82, 0x39, 0x10, 0xab, 0x50, 0x8a, 0xe0, 0x14, 0xad, 0x78, 0x5b,
+	0x8d, 0x60, 0xec, 0x57, 0xa4, 0x55, 0x94, 0x88, 0x0b, 0x44, 0x89, 0x8a, 0xa2, 0x3d, 0xef, 0xe6,
+	0x59, 0x32, 0x5b, 0x3b, 0x99, 0x5f, 0x44, 0xd2, 0x96, 0x60, 0xcb, 0x70, 0x4c, 0x99, 0x8c, 0xda,
+	0x71, 0x4c, 0x58, 0xfc, 0xed, 0xfe, 0x1c, 0x71, 0x05, 0x24, 0xe7, 0x68, 0xbb, 0x48, 0x40, 0xdd,
+	0x9a, 0xe9, 0x18, 0xcd, 0x96, 0x96, 0x57, 0x55, 0xa5, 0x8c, 0xc1, 0x35, 0xb5, 0x35, 0xa6, 0x4e,
+	0x5d, 0x24, 0x13, 0xa3, 0x53, 0xb3, 0xb5, 0x42, 0xf9, 0x5f, 0xa5, 0x96, 0x7c, 0x20, 0x18, 0x2e,
+	0x73, 0xe2, 0xfa, 0x9c, 0x13, 0xd3, 0x7b, 0x6b, 0x01, 0x96, 0x3b, 0xb8, 0x58, 0xb1, 0x98, 0x01,
+	0xb2, 0x82, 0xbc, 0x2c, 0x51, 0x57, 0xbc, 0x26, 0x33, 0x2f, 0x44, 0xd6, 0x21, 0x59, 0x11, 0x0b,
+	0x75, 0xe9, 0x4f, 0x6b, 0xe5, 0xff, 0x42, 0xbb, 0x73, 0xf2, 0xcf, 0x07, 0x88, 0xe3, 0x1e, 0x9c,
+	0xb4, 0xed, 0x6a, 0x7c, 0xfc, 0xe3, 0xf7, 0x47, 0x5e, 0xc8, 0x77, 0xd3, 0xed, 0x9e, 0x6b, 0xec,
+	0xee, 0x5a, 0xe6, 0xae, 0xc1, 0x0f, 0xe9, 0x74, 0xe2, 0x13, 0xb7, 0x51, 0x32, 0xc2, 0xe0, 0xbd,
+	0xe4, 0x5b, 0x5c, 0x49, 0x86, 0x61, 0xb4, 0x16, 0x03, 0x30, 0x05, 0xae, 0xc7, 0x7c, 0xe2, 0xb4,
+	0x6f, 0x57, 0xb5, 0x0d, 0x61, 0x70, 0x01, 0x15, 0x67, 0x1a, 0xd9, 0xe9, 0x6d, 0xf7, 0xc8, 0x8d,
+	0xf2, 0xe2, 0x48, 0x4d, 0x00, 0x91, 0x0c, 0x58, 0x2c, 0x02, 0xe1, 0x52, 0x44, 0x21, 0xc6, 0x24,
+	0x0f, 0x4c, 0xf2, 0x8a, 0x86, 0x98, 0x04, 0xd7, 0x45, 0x1c, 0x9f, 0xda, 0xbd, 0x65, 0x47, 0xf3,
+	0x9d, 0xa5, 0x7f, 0x36, 0xc2, 0x15, 0x9f, 0xc0, 0x23, 0x63, 0x1d, 0xce, 0x73, 0x0b, 0xaf, 0x79,
+	0x05, 0xbc, 0xdd, 0xb5, 0x4c, 0x2d, 0x55, 0x6e, 0x51, 0x3c, 0x26, 0xff, 0x14, 0x04, 0x49, 0x12,
+	0x1a, 0xd7, 0xf1, 0x6e, 0x83, 0x22, 0x8b, 0xdc, 0x5f, 0x62, 0x48, 0xda, 0x9f, 0x67, 0x50, 0xb5,
+	0x15, 0x1f, 0x49, 0x4b, 0xbf, 0xe9, 0x39, 0xea, 0x29, 0x1a, 0x92, 0x13, 0xee, 0x7c, 0x0b, 0x18,
+	0xb7, 0xdd, 0xb4, 0x6d, 0x59, 0x91, 0x72, 0x74, 0xc7, 0x7c, 0x49, 0x67, 0x4e, 0x5b, 0x4b, 0x53,
+	0xdd, 0x3d, 0x8f, 0x40, 0xb6, 0x8c, 0xb8, 0x8f, 0x00, 0xd8, 0x64, 0x53, 0x34, 0x4b, 0x7b, 0xfc,
+	0x22, 0x0a, 0x59, 0xd7, 0x54, 0xd6, 0x64, 0xdb, 0x74, 0x5d, 0x65, 0x4d, 0xa0, 0x90, 0x75, 0x43,
+	0xc6, 0x40, 0xcf, 0xa1, 0x86, 0x40, 0x4e, 0x06, 0x23, 0x8c, 0x26, 0x0b, 0x42, 0x26, 0x2e, 0x98,
+	0xc4, 0x4a, 0xd8, 0xa6, 0x83, 0xe5, 0x9b, 0x38, 0x5f, 0x2f, 0xc1, 0xe1, 0x30, 0x5b, 0x2a, 0x33,
+	0xaa, 0x21, 0x99, 0xb7, 0x55, 0xe6, 0x24, 0x0e, 0x99, 0x2f, 0xea, 0x37, 0xe3, 0x95, 0x48, 0xf8,
+	0xd7, 0xcf, 0xde, 0x67, 0xf4, 0x3b, 0xf1, 0x5a, 0xa8, 0x38, 0x64, 0x05, 0x07, 0xfc, 0x7d, 0xfa,
+	0xbb, 0x19, 0x58, 0x72, 0x25, 0x6e, 0x64, 0x50, 0x5f, 0xb0, 0x5e, 0x5d, 0xb8, 0xbd, 0x02, 0x30,
+	0x6c, 0x1f, 0x52, 0x51, 0xa5, 0xa5, 0x44, 0xb5, 0x14, 0x63, 0x5a, 0xcd, 0x7d, 0xb3, 0x63, 0xda,
+	0xf1, 0xf5, 0x8c, 0x5d, 0xa5, 0x58, 0xd2, 0xb2, 0x0a, 0x83, 0xac, 0xa0, 0x78, 0xdf, 0xd6, 0xd6,
+	0x72, 0xe5, 0x2f, 0xb0, 0x21, 0x10, 0xdf, 0x43, 0xc7, 0xab, 0xe7, 0x62, 0x0b, 0x55, 0x1b, 0x64,
+	0xa8, 0xe5, 0x73, 0xc7, 0x6d, 0x37, 0x3b, 0x98, 0xd1, 0x53, 0x0a, 0xcc, 0x78, 0x89, 0xb0, 0x34,
+	0xc5, 0xe0, 0xf3, 0x25, 0x2d, 0x8c, 0x1f, 0xe1, 0x69, 0x78, 0xee, 0x22, 0x32, 0xf9, 0x69, 0xd5,
+	0xc2, 0x7e, 0x4a, 0xa7, 0x5b, 0x6d, 0x18, 0x9d, 0x5d, 0x53, 0x36, 0xf3, 0x05, 0xc2, 0x7c, 0xbe,
+	0x67, 0xb4, 0xe4, 0x05, 0x35, 0x01, 0x6d, 0x1b, 0x36, 0xee, 0x5e, 0x49, 0x62, 0x3c, 0xd3, 0x67,
+	0x2a, 0x7b, 0xec, 0x03, 0x3f, 0x38, 0xe4, 0x97, 0x01, 0x07, 0x7e, 0x30, 0xfc, 0x14, 0xff, 0xa7,
+	0x16, 0x79, 0x39, 0xf0, 0xc1, 0xa3, 0x1f, 0x3e, 0x3a, 0x1c, 0x45, 0x47, 0xc7, 0x07, 0x9f, 0x0e,
+	0xfc, 0xc9, 0x7d, 0x41, 0x76, 0x1f, 0xc9, 0x7e, 0x85, 0xfe, 0x43, 0x97, 0x93, 0x47, 0xf7, 0x0f,
+	0x7d, 0xf5, 0xbf, 0x75, 0x39, 0x58, 0xe7, 0x98, 0x47, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0xe3,
+	0xd4, 0xf3, 0x50, 0xfa, 0x65, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/adapter.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/adapter.pb.go
new file mode 100644
index 0000000..4cc76e0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/adapter.pb.go
@@ -0,0 +1,234 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/adapter.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	any "github.com/golang/protobuf/ptypes/any"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AdapterConfig struct {
+	// Custom (vendor-specific) configuration attributes
+	AdditionalConfig     *any.Any `protobuf:"bytes,64,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AdapterConfig) Reset()         { *m = AdapterConfig{} }
+func (m *AdapterConfig) String() string { return proto.CompactTextString(m) }
+func (*AdapterConfig) ProtoMessage()    {}
+func (*AdapterConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{0}
+}
+
+func (m *AdapterConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AdapterConfig.Unmarshal(m, b)
+}
+func (m *AdapterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AdapterConfig.Marshal(b, m, deterministic)
+}
+func (m *AdapterConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdapterConfig.Merge(m, src)
+}
+func (m *AdapterConfig) XXX_Size() int {
+	return xxx_messageInfo_AdapterConfig.Size(m)
+}
+func (m *AdapterConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdapterConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdapterConfig proto.InternalMessageInfo
+
+func (m *AdapterConfig) GetAdditionalConfig() *any.Any {
+	if m != nil {
+		return m.AdditionalConfig
+	}
+	return nil
+}
+
+// Adapter (software plugin)
+type Adapter struct {
+	// Unique name of adapter, matching the python package name under
+	// voltha/adapters.
+	Id      string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Vendor  string `protobuf:"bytes,2,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+	// Adapter configuration
+	Config *AdapterConfig `protobuf:"bytes,16,opt,name=config,proto3" json:"config,omitempty"`
+	// Custom descriptors and custom configuration
+	AdditionalDescription *any.Any `protobuf:"bytes,64,opt,name=additional_description,json=additionalDescription,proto3" json:"additional_description,omitempty"`
+	LogicalDeviceIds      []string `protobuf:"bytes,4,rep,name=logical_device_ids,json=logicalDeviceIds,proto3" json:"logical_device_ids,omitempty"`
+	// timestamp when the adapter last sent a message to the core
+	LastCommunication    *timestamp.Timestamp `protobuf:"bytes,5,opt,name=last_communication,json=lastCommunication,proto3" json:"last_communication,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *Adapter) Reset()         { *m = Adapter{} }
+func (m *Adapter) String() string { return proto.CompactTextString(m) }
+func (*Adapter) ProtoMessage()    {}
+func (*Adapter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{1}
+}
+
+func (m *Adapter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Adapter.Unmarshal(m, b)
+}
+func (m *Adapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Adapter.Marshal(b, m, deterministic)
+}
+func (m *Adapter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Adapter.Merge(m, src)
+}
+func (m *Adapter) XXX_Size() int {
+	return xxx_messageInfo_Adapter.Size(m)
+}
+func (m *Adapter) XXX_DiscardUnknown() {
+	xxx_messageInfo_Adapter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Adapter proto.InternalMessageInfo
+
+func (m *Adapter) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *Adapter) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *Adapter) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Adapter) GetConfig() *AdapterConfig {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *Adapter) GetAdditionalDescription() *any.Any {
+	if m != nil {
+		return m.AdditionalDescription
+	}
+	return nil
+}
+
+func (m *Adapter) GetLogicalDeviceIds() []string {
+	if m != nil {
+		return m.LogicalDeviceIds
+	}
+	return nil
+}
+
+func (m *Adapter) GetLastCommunication() *timestamp.Timestamp {
+	if m != nil {
+		return m.LastCommunication
+	}
+	return nil
+}
+
+type Adapters struct {
+	Items                []*Adapter `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *Adapters) Reset()         { *m = Adapters{} }
+func (m *Adapters) String() string { return proto.CompactTextString(m) }
+func (*Adapters) ProtoMessage()    {}
+func (*Adapters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{2}
+}
+
+func (m *Adapters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Adapters.Unmarshal(m, b)
+}
+func (m *Adapters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Adapters.Marshal(b, m, deterministic)
+}
+func (m *Adapters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Adapters.Merge(m, src)
+}
+func (m *Adapters) XXX_Size() int {
+	return xxx_messageInfo_Adapters.Size(m)
+}
+func (m *Adapters) XXX_DiscardUnknown() {
+	xxx_messageInfo_Adapters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Adapters proto.InternalMessageInfo
+
+func (m *Adapters) GetItems() []*Adapter {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*AdapterConfig)(nil), "voltha.AdapterConfig")
+	proto.RegisterType((*Adapter)(nil), "voltha.Adapter")
+	proto.RegisterType((*Adapters)(nil), "voltha.Adapters")
+}
+
+func init() { proto.RegisterFile("voltha_protos/adapter.proto", fileDescriptor_7e998ce153307274) }
+
+var fileDescriptor_7e998ce153307274 = []byte{
+	// 405 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xd1, 0x6a, 0xdb, 0x30,
+	0x14, 0x86, 0x71, 0xb2, 0xb8, 0xab, 0x4a, 0x59, 0xaa, 0x2d, 0xc3, 0xf3, 0x28, 0x35, 0x81, 0x81,
+	0x2f, 0x56, 0x99, 0xb5, 0x2f, 0xb0, 0xa4, 0xbd, 0xe9, 0xad, 0x28, 0xbb, 0xd8, 0x8d, 0x51, 0x24,
+	0xd5, 0x15, 0xd8, 0x3a, 0xc6, 0x52, 0x0c, 0x7d, 0xc8, 0xbd, 0xc1, 0x1e, 0x60, 0x4f, 0xb0, 0xeb,
+	0x11, 0x49, 0x26, 0x4e, 0x06, 0xbd, 0x32, 0xfa, 0xbf, 0xff, 0xfc, 0xe7, 0x1c, 0xc9, 0xe8, 0x73,
+	0x0f, 0xb5, 0x7d, 0x66, 0x65, 0xdb, 0x81, 0x05, 0x53, 0x30, 0xc1, 0x5a, 0x2b, 0x3b, 0xe2, 0x8e,
+	0x38, 0xf6, 0x30, 0xfd, 0x54, 0x01, 0x54, 0xb5, 0x2c, 0x9c, 0xba, 0xd9, 0x3e, 0x15, 0x4c, 0xbf,
+	0x78, 0x4b, 0x9a, 0x1e, 0xd6, 0x73, 0x68, 0x1a, 0xd0, 0x81, 0x25, 0x87, 0xac, 0x91, 0x96, 0x05,
+	0x72, 0x75, 0x1c, 0x68, 0x55, 0x23, 0x8d, 0x65, 0x4d, 0xeb, 0x0d, 0x4b, 0x8a, 0xce, 0x57, 0x7e,
+	0x94, 0x3b, 0xd0, 0x4f, 0xaa, 0xc2, 0x2b, 0x74, 0xc1, 0x84, 0x50, 0x56, 0x81, 0x66, 0x75, 0xc9,
+	0x9d, 0x98, 0x7c, 0xcf, 0xa2, 0xfc, 0xec, 0xe6, 0x03, 0xf1, 0x69, 0x64, 0x48, 0x23, 0x2b, 0xfd,
+	0x42, 0xe7, 0x7b, 0xbb, 0x8f, 0x58, 0xfe, 0x9e, 0xa0, 0x93, 0x10, 0x8a, 0x17, 0x68, 0xa2, 0x44,
+	0x12, 0x65, 0x51, 0x7e, 0xba, 0x9e, 0xfd, 0xf9, 0xfb, 0xeb, 0x32, 0xa2, 0x13, 0x25, 0xf0, 0x25,
+	0x8a, 0x7b, 0xa9, 0x05, 0x74, 0xc9, 0x64, 0x8c, 0x82, 0x88, 0xaf, 0xd0, 0x49, 0x2f, 0x3b, 0xa3,
+	0x40, 0x27, 0xd3, 0x31, 0x1f, 0x54, 0x7c, 0x8d, 0xe2, 0x30, 0xda, 0xdc, 0x8d, 0xb6, 0x20, 0xfe,
+	0x0a, 0xc8, 0xc1, 0x32, 0x34, 0x98, 0x30, 0x45, 0x1f, 0x47, 0x4b, 0x09, 0x69, 0x78, 0xa7, 0xda,
+	0xdd, 0xe9, 0xb5, 0xcd, 0x86, 0xa6, 0x8b, 0x7d, 0xe9, 0xfd, 0xbe, 0x12, 0x7f, 0x45, 0xb8, 0x86,
+	0x4a, 0x71, 0x17, 0xd8, 0x2b, 0x2e, 0x4b, 0x25, 0x4c, 0xf2, 0x26, 0x9b, 0xe6, 0xa7, 0x74, 0x1e,
+	0xc8, 0xbd, 0x03, 0x0f, 0xc2, 0xe0, 0x07, 0x84, 0x6b, 0x66, 0x6c, 0xb9, 0x7b, 0xb7, 0xad, 0x56,
+	0x9c, 0xb9, 0xee, 0x33, 0xd7, 0x3d, 0xfd, 0xaf, 0xfb, 0xe3, 0xf0, 0x4a, 0xf4, 0x62, 0x57, 0x75,
+	0x37, 0x2e, 0x5a, 0x7e, 0x43, 0x6f, 0xc3, 0x96, 0x06, 0x7f, 0x41, 0x33, 0x65, 0x65, 0x63, 0x92,
+	0x28, 0x9b, 0xe6, 0x67, 0x37, 0xef, 0x8e, 0xae, 0x81, 0x7a, 0xba, 0x7e, 0x44, 0xef, 0xa1, 0xab,
+	0x08, 0xb4, 0x52, 0x73, 0xe8, 0x44, 0x70, 0xad, 0xcf, 0x7f, 0xb8, 0x6f, 0x30, 0xff, 0x24, 0x95,
+	0xb2, 0xcf, 0xdb, 0x0d, 0xe1, 0xd0, 0x14, 0x83, 0xb5, 0xf0, 0xd6, 0xeb, 0xf0, 0x6b, 0xf5, 0xb7,
+	0x45, 0x05, 0x41, 0xdb, 0xc4, 0x4e, 0xbc, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xc5, 0xdf,
+	0x09, 0xdb, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/device.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/device.pb.go
new file mode 100644
index 0000000..e713544
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/device.pb.go
@@ -0,0 +1,1865 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/device.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	any "github.com/golang/protobuf/ptypes/any"
+	common "github.com/opencord/voltha-protos/v3/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PmConfig_PmType int32
+
+const (
+	PmConfig_COUNTER PmConfig_PmType = 0
+	PmConfig_GAUGE   PmConfig_PmType = 1
+	PmConfig_STATE   PmConfig_PmType = 2
+	PmConfig_CONTEXT PmConfig_PmType = 3
+)
+
+var PmConfig_PmType_name = map[int32]string{
+	0: "COUNTER",
+	1: "GAUGE",
+	2: "STATE",
+	3: "CONTEXT",
+}
+
+var PmConfig_PmType_value = map[string]int32{
+	"COUNTER": 0,
+	"GAUGE":   1,
+	"STATE":   2,
+	"CONTEXT": 3,
+}
+
+func (x PmConfig_PmType) String() string {
+	return proto.EnumName(PmConfig_PmType_name, int32(x))
+}
+
+func (PmConfig_PmType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{2, 0}
+}
+
+type ImageDownload_ImageDownloadState int32
+
+const (
+	ImageDownload_DOWNLOAD_UNKNOWN     ImageDownload_ImageDownloadState = 0
+	ImageDownload_DOWNLOAD_SUCCEEDED   ImageDownload_ImageDownloadState = 1
+	ImageDownload_DOWNLOAD_REQUESTED   ImageDownload_ImageDownloadState = 2
+	ImageDownload_DOWNLOAD_STARTED     ImageDownload_ImageDownloadState = 3
+	ImageDownload_DOWNLOAD_FAILED      ImageDownload_ImageDownloadState = 4
+	ImageDownload_DOWNLOAD_UNSUPPORTED ImageDownload_ImageDownloadState = 5
+	ImageDownload_DOWNLOAD_CANCELLED   ImageDownload_ImageDownloadState = 6
+)
+
+var ImageDownload_ImageDownloadState_name = map[int32]string{
+	0: "DOWNLOAD_UNKNOWN",
+	1: "DOWNLOAD_SUCCEEDED",
+	2: "DOWNLOAD_REQUESTED",
+	3: "DOWNLOAD_STARTED",
+	4: "DOWNLOAD_FAILED",
+	5: "DOWNLOAD_UNSUPPORTED",
+	6: "DOWNLOAD_CANCELLED",
+}
+
+var ImageDownload_ImageDownloadState_value = map[string]int32{
+	"DOWNLOAD_UNKNOWN":     0,
+	"DOWNLOAD_SUCCEEDED":   1,
+	"DOWNLOAD_REQUESTED":   2,
+	"DOWNLOAD_STARTED":     3,
+	"DOWNLOAD_FAILED":      4,
+	"DOWNLOAD_UNSUPPORTED": 5,
+	"DOWNLOAD_CANCELLED":   6,
+}
+
+func (x ImageDownload_ImageDownloadState) String() string {
+	return proto.EnumName(ImageDownload_ImageDownloadState_name, int32(x))
+}
+
+func (ImageDownload_ImageDownloadState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{7, 0}
+}
+
+type ImageDownload_ImageDownloadFailureReason int32
+
+const (
+	ImageDownload_NO_ERROR           ImageDownload_ImageDownloadFailureReason = 0
+	ImageDownload_INVALID_URL        ImageDownload_ImageDownloadFailureReason = 1
+	ImageDownload_DEVICE_BUSY        ImageDownload_ImageDownloadFailureReason = 2
+	ImageDownload_INSUFFICIENT_SPACE ImageDownload_ImageDownloadFailureReason = 3
+	ImageDownload_UNKNOWN_ERROR      ImageDownload_ImageDownloadFailureReason = 4
+	ImageDownload_CANCELLED          ImageDownload_ImageDownloadFailureReason = 5
+)
+
+var ImageDownload_ImageDownloadFailureReason_name = map[int32]string{
+	0: "NO_ERROR",
+	1: "INVALID_URL",
+	2: "DEVICE_BUSY",
+	3: "INSUFFICIENT_SPACE",
+	4: "UNKNOWN_ERROR",
+	5: "CANCELLED",
+}
+
+var ImageDownload_ImageDownloadFailureReason_value = map[string]int32{
+	"NO_ERROR":           0,
+	"INVALID_URL":        1,
+	"DEVICE_BUSY":        2,
+	"INSUFFICIENT_SPACE": 3,
+	"UNKNOWN_ERROR":      4,
+	"CANCELLED":          5,
+}
+
+func (x ImageDownload_ImageDownloadFailureReason) String() string {
+	return proto.EnumName(ImageDownload_ImageDownloadFailureReason_name, int32(x))
+}
+
+func (ImageDownload_ImageDownloadFailureReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{7, 1}
+}
+
+type ImageDownload_ImageActivateState int32
+
+const (
+	ImageDownload_IMAGE_UNKNOWN    ImageDownload_ImageActivateState = 0
+	ImageDownload_IMAGE_INACTIVE   ImageDownload_ImageActivateState = 1
+	ImageDownload_IMAGE_ACTIVATING ImageDownload_ImageActivateState = 2
+	ImageDownload_IMAGE_ACTIVE     ImageDownload_ImageActivateState = 3
+	ImageDownload_IMAGE_REVERTING  ImageDownload_ImageActivateState = 4
+	ImageDownload_IMAGE_REVERTED   ImageDownload_ImageActivateState = 5
+)
+
+var ImageDownload_ImageActivateState_name = map[int32]string{
+	0: "IMAGE_UNKNOWN",
+	1: "IMAGE_INACTIVE",
+	2: "IMAGE_ACTIVATING",
+	3: "IMAGE_ACTIVE",
+	4: "IMAGE_REVERTING",
+	5: "IMAGE_REVERTED",
+}
+
+var ImageDownload_ImageActivateState_value = map[string]int32{
+	"IMAGE_UNKNOWN":    0,
+	"IMAGE_INACTIVE":   1,
+	"IMAGE_ACTIVATING": 2,
+	"IMAGE_ACTIVE":     3,
+	"IMAGE_REVERTING":  4,
+	"IMAGE_REVERTED":   5,
+}
+
+func (x ImageDownload_ImageActivateState) String() string {
+	return proto.EnumName(ImageDownload_ImageActivateState_name, int32(x))
+}
+
+func (ImageDownload_ImageActivateState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{7, 2}
+}
+
+type Port_PortType int32
+
+const (
+	Port_UNKNOWN      Port_PortType = 0
+	Port_ETHERNET_NNI Port_PortType = 1
+	Port_ETHERNET_UNI Port_PortType = 2
+	Port_PON_OLT      Port_PortType = 3
+	Port_PON_ONU      Port_PortType = 4
+	Port_VENET_OLT    Port_PortType = 5
+	Port_VENET_ONU    Port_PortType = 6
+)
+
+var Port_PortType_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "ETHERNET_NNI",
+	2: "ETHERNET_UNI",
+	3: "PON_OLT",
+	4: "PON_ONU",
+	5: "VENET_OLT",
+	6: "VENET_ONU",
+}
+
+var Port_PortType_value = map[string]int32{
+	"UNKNOWN":      0,
+	"ETHERNET_NNI": 1,
+	"ETHERNET_UNI": 2,
+	"PON_OLT":      3,
+	"PON_ONU":      4,
+	"VENET_OLT":    5,
+	"VENET_ONU":    6,
+}
+
+func (x Port_PortType) String() string {
+	return proto.EnumName(Port_PortType_name, int32(x))
+}
+
+func (Port_PortType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{9, 0}
+}
+
+type SimulateAlarmRequest_OperationType int32
+
+const (
+	SimulateAlarmRequest_RAISE SimulateAlarmRequest_OperationType = 0
+	SimulateAlarmRequest_CLEAR SimulateAlarmRequest_OperationType = 1
+)
+
+var SimulateAlarmRequest_OperationType_name = map[int32]string{
+	0: "RAISE",
+	1: "CLEAR",
+}
+
+var SimulateAlarmRequest_OperationType_value = map[string]int32{
+	"RAISE": 0,
+	"CLEAR": 1,
+}
+
+func (x SimulateAlarmRequest_OperationType) String() string {
+	return proto.EnumName(SimulateAlarmRequest_OperationType_name, int32(x))
+}
+
+func (SimulateAlarmRequest_OperationType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{13, 0}
+}
+
+// A Device Type
+type DeviceType struct {
+	// Unique name for the device type
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Unique vendor id for the device type applicable to ONU
+	// 4 bytes of vendor id from ONU serial number
+	VendorId  string   `protobuf:"bytes,5,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	VendorIds []string `protobuf:"bytes,6,rep,name=vendor_ids,json=vendorIds,proto3" json:"vendor_ids,omitempty"`
+	// Name of the adapter that handles device type
+	Adapter string `protobuf:"bytes,2,opt,name=adapter,proto3" json:"adapter,omitempty"`
+	// Capabilities
+	AcceptsBulkFlowUpdate           bool     `protobuf:"varint,3,opt,name=accepts_bulk_flow_update,json=acceptsBulkFlowUpdate,proto3" json:"accepts_bulk_flow_update,omitempty"`
+	AcceptsAddRemoveFlowUpdates     bool     `protobuf:"varint,4,opt,name=accepts_add_remove_flow_updates,json=acceptsAddRemoveFlowUpdates,proto3" json:"accepts_add_remove_flow_updates,omitempty"`
+	AcceptsDirectLogicalFlowsUpdate bool     `protobuf:"varint,7,opt,name=accepts_direct_logical_flows_update,json=acceptsDirectLogicalFlowsUpdate,proto3" json:"accepts_direct_logical_flows_update,omitempty"`
+	XXX_NoUnkeyedLiteral            struct{} `json:"-"`
+	XXX_unrecognized                []byte   `json:"-"`
+	XXX_sizecache                   int32    `json:"-"`
+}
+
+func (m *DeviceType) Reset()         { *m = DeviceType{} }
+func (m *DeviceType) String() string { return proto.CompactTextString(m) }
+func (*DeviceType) ProtoMessage()    {}
+func (*DeviceType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{0}
+}
+
+func (m *DeviceType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceType.Unmarshal(m, b)
+}
+func (m *DeviceType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceType.Marshal(b, m, deterministic)
+}
+func (m *DeviceType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceType.Merge(m, src)
+}
+func (m *DeviceType) XXX_Size() int {
+	return xxx_messageInfo_DeviceType.Size(m)
+}
+func (m *DeviceType) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceType proto.InternalMessageInfo
+
+func (m *DeviceType) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *DeviceType) GetVendorId() string {
+	if m != nil {
+		return m.VendorId
+	}
+	return ""
+}
+
+func (m *DeviceType) GetVendorIds() []string {
+	if m != nil {
+		return m.VendorIds
+	}
+	return nil
+}
+
+func (m *DeviceType) GetAdapter() string {
+	if m != nil {
+		return m.Adapter
+	}
+	return ""
+}
+
+func (m *DeviceType) GetAcceptsBulkFlowUpdate() bool {
+	if m != nil {
+		return m.AcceptsBulkFlowUpdate
+	}
+	return false
+}
+
+func (m *DeviceType) GetAcceptsAddRemoveFlowUpdates() bool {
+	if m != nil {
+		return m.AcceptsAddRemoveFlowUpdates
+	}
+	return false
+}
+
+func (m *DeviceType) GetAcceptsDirectLogicalFlowsUpdate() bool {
+	if m != nil {
+		return m.AcceptsDirectLogicalFlowsUpdate
+	}
+	return false
+}
+
+// A plurality of device types
+type DeviceTypes struct {
+	Items                []*DeviceType `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *DeviceTypes) Reset()         { *m = DeviceTypes{} }
+func (m *DeviceTypes) String() string { return proto.CompactTextString(m) }
+func (*DeviceTypes) ProtoMessage()    {}
+func (*DeviceTypes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{1}
+}
+
+func (m *DeviceTypes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceTypes.Unmarshal(m, b)
+}
+func (m *DeviceTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceTypes.Marshal(b, m, deterministic)
+}
+func (m *DeviceTypes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceTypes.Merge(m, src)
+}
+func (m *DeviceTypes) XXX_Size() int {
+	return xxx_messageInfo_DeviceTypes.Size(m)
+}
+func (m *DeviceTypes) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceTypes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTypes proto.InternalMessageInfo
+
+func (m *DeviceTypes) GetItems() []*DeviceType {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type PmConfig struct {
+	Name                 string          `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Type                 PmConfig_PmType `protobuf:"varint,2,opt,name=type,proto3,enum=voltha.PmConfig_PmType" json:"type,omitempty"`
+	Enabled              bool            `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	SampleFreq           uint32          `protobuf:"varint,4,opt,name=sample_freq,json=sampleFreq,proto3" json:"sample_freq,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *PmConfig) Reset()         { *m = PmConfig{} }
+func (m *PmConfig) String() string { return proto.CompactTextString(m) }
+func (*PmConfig) ProtoMessage()    {}
+func (*PmConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{2}
+}
+
+func (m *PmConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmConfig.Unmarshal(m, b)
+}
+func (m *PmConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmConfig.Marshal(b, m, deterministic)
+}
+func (m *PmConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmConfig.Merge(m, src)
+}
+func (m *PmConfig) XXX_Size() int {
+	return xxx_messageInfo_PmConfig.Size(m)
+}
+func (m *PmConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmConfig proto.InternalMessageInfo
+
+func (m *PmConfig) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *PmConfig) GetType() PmConfig_PmType {
+	if m != nil {
+		return m.Type
+	}
+	return PmConfig_COUNTER
+}
+
+func (m *PmConfig) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *PmConfig) GetSampleFreq() uint32 {
+	if m != nil {
+		return m.SampleFreq
+	}
+	return 0
+}
+
+type PmGroupConfig struct {
+	GroupName            string      `protobuf:"bytes,1,opt,name=group_name,json=groupName,proto3" json:"group_name,omitempty"`
+	GroupFreq            uint32      `protobuf:"varint,2,opt,name=group_freq,json=groupFreq,proto3" json:"group_freq,omitempty"`
+	Enabled              bool        `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	Metrics              []*PmConfig `protobuf:"bytes,4,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *PmGroupConfig) Reset()         { *m = PmGroupConfig{} }
+func (m *PmGroupConfig) String() string { return proto.CompactTextString(m) }
+func (*PmGroupConfig) ProtoMessage()    {}
+func (*PmGroupConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{3}
+}
+
+func (m *PmGroupConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmGroupConfig.Unmarshal(m, b)
+}
+func (m *PmGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmGroupConfig.Marshal(b, m, deterministic)
+}
+func (m *PmGroupConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmGroupConfig.Merge(m, src)
+}
+func (m *PmGroupConfig) XXX_Size() int {
+	return xxx_messageInfo_PmGroupConfig.Size(m)
+}
+func (m *PmGroupConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmGroupConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmGroupConfig proto.InternalMessageInfo
+
+func (m *PmGroupConfig) GetGroupName() string {
+	if m != nil {
+		return m.GroupName
+	}
+	return ""
+}
+
+func (m *PmGroupConfig) GetGroupFreq() uint32 {
+	if m != nil {
+		return m.GroupFreq
+	}
+	return 0
+}
+
+func (m *PmGroupConfig) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *PmGroupConfig) GetMetrics() []*PmConfig {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+type PmConfigs struct {
+	Id          string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	DefaultFreq uint32 `protobuf:"varint,2,opt,name=default_freq,json=defaultFreq,proto3" json:"default_freq,omitempty"`
+	// Forces group names and group semantics
+	Grouped bool `protobuf:"varint,3,opt,name=grouped,proto3" json:"grouped,omitempty"`
+	// Allows Pm to set an individual sample frequency
+	FreqOverride         bool             `protobuf:"varint,4,opt,name=freq_override,json=freqOverride,proto3" json:"freq_override,omitempty"`
+	Groups               []*PmGroupConfig `protobuf:"bytes,5,rep,name=groups,proto3" json:"groups,omitempty"`
+	Metrics              []*PmConfig      `protobuf:"bytes,6,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *PmConfigs) Reset()         { *m = PmConfigs{} }
+func (m *PmConfigs) String() string { return proto.CompactTextString(m) }
+func (*PmConfigs) ProtoMessage()    {}
+func (*PmConfigs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{4}
+}
+
+func (m *PmConfigs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmConfigs.Unmarshal(m, b)
+}
+func (m *PmConfigs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmConfigs.Marshal(b, m, deterministic)
+}
+func (m *PmConfigs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmConfigs.Merge(m, src)
+}
+func (m *PmConfigs) XXX_Size() int {
+	return xxx_messageInfo_PmConfigs.Size(m)
+}
+func (m *PmConfigs) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmConfigs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmConfigs proto.InternalMessageInfo
+
+func (m *PmConfigs) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PmConfigs) GetDefaultFreq() uint32 {
+	if m != nil {
+		return m.DefaultFreq
+	}
+	return 0
+}
+
+func (m *PmConfigs) GetGrouped() bool {
+	if m != nil {
+		return m.Grouped
+	}
+	return false
+}
+
+func (m *PmConfigs) GetFreqOverride() bool {
+	if m != nil {
+		return m.FreqOverride
+	}
+	return false
+}
+
+func (m *PmConfigs) GetGroups() []*PmGroupConfig {
+	if m != nil {
+		return m.Groups
+	}
+	return nil
+}
+
+func (m *PmConfigs) GetMetrics() []*PmConfig {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+// Describes instance of software image on the device
+type Image struct {
+	Name            string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version         string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	Hash            string `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
+	InstallDatetime string `protobuf:"bytes,4,opt,name=install_datetime,json=installDatetime,proto3" json:"install_datetime,omitempty"`
+	// The active software image is one that is currently loaded and executing
+	// in the ONU or circuit pack. Under normal operation, one software image
+	// is always active while the other is inactive. Under no circumstances are
+	// both software images allowed to be active at the same time
+	IsActive bool `protobuf:"varint,5,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"`
+	// The committed software image is loaded and executed upon reboot of the
+	// ONU and/or circuit pack. During normal operation, one software image is
+	// always committed, while the other is uncommitted.
+	IsCommitted bool `protobuf:"varint,6,opt,name=is_committed,json=isCommitted,proto3" json:"is_committed,omitempty"`
+	// A software image is valid if it has been verified to be an executable
+	// code image. The verification mechanism is not subject to standardization;
+	// however, it should include at least a data integrity (e.g., CRC) check of
+	// the entire code image.
+	IsValid              bool     `protobuf:"varint,7,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Image) Reset()         { *m = Image{} }
+func (m *Image) String() string { return proto.CompactTextString(m) }
+func (*Image) ProtoMessage()    {}
+func (*Image) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{5}
+}
+
+func (m *Image) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Image.Unmarshal(m, b)
+}
+func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Image.Marshal(b, m, deterministic)
+}
+func (m *Image) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Image.Merge(m, src)
+}
+func (m *Image) XXX_Size() int {
+	return xxx_messageInfo_Image.Size(m)
+}
+func (m *Image) XXX_DiscardUnknown() {
+	xxx_messageInfo_Image.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Image proto.InternalMessageInfo
+
+func (m *Image) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Image) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Image) GetHash() string {
+	if m != nil {
+		return m.Hash
+	}
+	return ""
+}
+
+func (m *Image) GetInstallDatetime() string {
+	if m != nil {
+		return m.InstallDatetime
+	}
+	return ""
+}
+
+func (m *Image) GetIsActive() bool {
+	if m != nil {
+		return m.IsActive
+	}
+	return false
+}
+
+func (m *Image) GetIsCommitted() bool {
+	if m != nil {
+		return m.IsCommitted
+	}
+	return false
+}
+
+func (m *Image) GetIsValid() bool {
+	if m != nil {
+		return m.IsValid
+	}
+	return false
+}
+
+// List of software on the device
+type Images struct {
+	Image                []*Image `protobuf:"bytes,1,rep,name=image,proto3" json:"image,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Images) Reset()         { *m = Images{} }
+func (m *Images) String() string { return proto.CompactTextString(m) }
+func (*Images) ProtoMessage()    {}
+func (*Images) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{6}
+}
+
+func (m *Images) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Images.Unmarshal(m, b)
+}
+func (m *Images) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Images.Marshal(b, m, deterministic)
+}
+func (m *Images) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Images.Merge(m, src)
+}
+func (m *Images) XXX_Size() int {
+	return xxx_messageInfo_Images.Size(m)
+}
+func (m *Images) XXX_DiscardUnknown() {
+	xxx_messageInfo_Images.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Images proto.InternalMessageInfo
+
+func (m *Images) GetImage() []*Image {
+	if m != nil {
+		return m.Image
+	}
+	return nil
+}
+
+type ImageDownload struct {
+	// Device Identifier
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Image unique identifier
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// URL where the image is available
+	// should include username password
+	Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
+	// CRC of the image to be verified aginst
+	Crc uint32 `protobuf:"varint,4,opt,name=crc,proto3" json:"crc,omitempty"`
+	// Download state
+	DownloadState ImageDownload_ImageDownloadState `protobuf:"varint,5,opt,name=download_state,json=downloadState,proto3,enum=voltha.ImageDownload_ImageDownloadState" json:"download_state,omitempty"`
+	// Downloaded version
+	ImageVersion string `protobuf:"bytes,6,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
+	// Bytes downloaded
+	DownloadedBytes uint32 `protobuf:"varint,7,opt,name=downloaded_bytes,json=downloadedBytes,proto3" json:"downloaded_bytes,omitempty"`
+	// Download failure reason
+	Reason ImageDownload_ImageDownloadFailureReason `protobuf:"varint,8,opt,name=reason,proto3,enum=voltha.ImageDownload_ImageDownloadFailureReason" json:"reason,omitempty"`
+	// Additional info
+	AdditionalInfo string `protobuf:"bytes,9,opt,name=additional_info,json=additionalInfo,proto3" json:"additional_info,omitempty"`
+	// Save current configuration
+	SaveConfig bool `protobuf:"varint,10,opt,name=save_config,json=saveConfig,proto3" json:"save_config,omitempty"`
+	// Image local location
+	LocalDir string `protobuf:"bytes,11,opt,name=local_dir,json=localDir,proto3" json:"local_dir,omitempty"`
+	// Image activation state
+	ImageState ImageDownload_ImageActivateState `protobuf:"varint,12,opt,name=image_state,json=imageState,proto3,enum=voltha.ImageDownload_ImageActivateState" json:"image_state,omitempty"`
+	// Image file size
+	FileSize             uint32   `protobuf:"varint,13,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ImageDownload) Reset()         { *m = ImageDownload{} }
+func (m *ImageDownload) String() string { return proto.CompactTextString(m) }
+func (*ImageDownload) ProtoMessage()    {}
+func (*ImageDownload) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{7}
+}
+
+func (m *ImageDownload) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageDownload.Unmarshal(m, b)
+}
+func (m *ImageDownload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageDownload.Marshal(b, m, deterministic)
+}
+func (m *ImageDownload) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDownload.Merge(m, src)
+}
+func (m *ImageDownload) XXX_Size() int {
+	return xxx_messageInfo_ImageDownload.Size(m)
+}
+func (m *ImageDownload) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDownload.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDownload proto.InternalMessageInfo
+
+func (m *ImageDownload) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetCrc() uint32 {
+	if m != nil {
+		return m.Crc
+	}
+	return 0
+}
+
+func (m *ImageDownload) GetDownloadState() ImageDownload_ImageDownloadState {
+	if m != nil {
+		return m.DownloadState
+	}
+	return ImageDownload_DOWNLOAD_UNKNOWN
+}
+
+func (m *ImageDownload) GetImageVersion() string {
+	if m != nil {
+		return m.ImageVersion
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetDownloadedBytes() uint32 {
+	if m != nil {
+		return m.DownloadedBytes
+	}
+	return 0
+}
+
+func (m *ImageDownload) GetReason() ImageDownload_ImageDownloadFailureReason {
+	if m != nil {
+		return m.Reason
+	}
+	return ImageDownload_NO_ERROR
+}
+
+func (m *ImageDownload) GetAdditionalInfo() string {
+	if m != nil {
+		return m.AdditionalInfo
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetSaveConfig() bool {
+	if m != nil {
+		return m.SaveConfig
+	}
+	return false
+}
+
+func (m *ImageDownload) GetLocalDir() string {
+	if m != nil {
+		return m.LocalDir
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetImageState() ImageDownload_ImageActivateState {
+	if m != nil {
+		return m.ImageState
+	}
+	return ImageDownload_IMAGE_UNKNOWN
+}
+
+func (m *ImageDownload) GetFileSize() uint32 {
+	if m != nil {
+		return m.FileSize
+	}
+	return 0
+}
+
+type ImageDownloads struct {
+	Items                []*ImageDownload `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ImageDownloads) Reset()         { *m = ImageDownloads{} }
+func (m *ImageDownloads) String() string { return proto.CompactTextString(m) }
+func (*ImageDownloads) ProtoMessage()    {}
+func (*ImageDownloads) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{8}
+}
+
+func (m *ImageDownloads) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageDownloads.Unmarshal(m, b)
+}
+func (m *ImageDownloads) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageDownloads.Marshal(b, m, deterministic)
+}
+func (m *ImageDownloads) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDownloads.Merge(m, src)
+}
+func (m *ImageDownloads) XXX_Size() int {
+	return xxx_messageInfo_ImageDownloads.Size(m)
+}
+func (m *ImageDownloads) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDownloads.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDownloads proto.InternalMessageInfo
+
+func (m *ImageDownloads) GetItems() []*ImageDownload {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type Port struct {
+	PortNo               uint32                  `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	Label                string                  `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"`
+	Type                 Port_PortType           `protobuf:"varint,3,opt,name=type,proto3,enum=voltha.Port_PortType" json:"type,omitempty"`
+	AdminState           common.AdminState_Types `protobuf:"varint,5,opt,name=admin_state,json=adminState,proto3,enum=common.AdminState_Types" json:"admin_state,omitempty"`
+	OperStatus           common.OperStatus_Types `protobuf:"varint,6,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	DeviceId             string                  `protobuf:"bytes,7,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Peers                []*Port_PeerPort        `protobuf:"bytes,8,rep,name=peers,proto3" json:"peers,omitempty"`
+	RxPackets            uint64                  `protobuf:"fixed64,9,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	RxBytes              uint64                  `protobuf:"fixed64,10,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxErrors             uint64                  `protobuf:"fixed64,11,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
+	TxPackets            uint64                  `protobuf:"fixed64,12,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxBytes              uint64                  `protobuf:"fixed64,13,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxErrors             uint64                  `protobuf:"fixed64,14,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *Port) Reset()         { *m = Port{} }
+func (m *Port) String() string { return proto.CompactTextString(m) }
+func (*Port) ProtoMessage()    {}
+func (*Port) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{9}
+}
+
+func (m *Port) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Port.Unmarshal(m, b)
+}
+func (m *Port) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Port.Marshal(b, m, deterministic)
+}
+func (m *Port) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Port.Merge(m, src)
+}
+func (m *Port) XXX_Size() int {
+	return xxx_messageInfo_Port.Size(m)
+}
+func (m *Port) XXX_DiscardUnknown() {
+	xxx_messageInfo_Port.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Port proto.InternalMessageInfo
+
+func (m *Port) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *Port) GetLabel() string {
+	if m != nil {
+		return m.Label
+	}
+	return ""
+}
+
+func (m *Port) GetType() Port_PortType {
+	if m != nil {
+		return m.Type
+	}
+	return Port_UNKNOWN
+}
+
+func (m *Port) GetAdminState() common.AdminState_Types {
+	if m != nil {
+		return m.AdminState
+	}
+	return common.AdminState_UNKNOWN
+}
+
+func (m *Port) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+func (m *Port) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Port) GetPeers() []*Port_PeerPort {
+	if m != nil {
+		return m.Peers
+	}
+	return nil
+}
+
+func (m *Port) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *Port) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *Port) GetRxErrors() uint64 {
+	if m != nil {
+		return m.RxErrors
+	}
+	return 0
+}
+
+func (m *Port) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *Port) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *Port) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+type Port_PeerPort struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	PortNo               uint32   `protobuf:"varint,2,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Port_PeerPort) Reset()         { *m = Port_PeerPort{} }
+func (m *Port_PeerPort) String() string { return proto.CompactTextString(m) }
+func (*Port_PeerPort) ProtoMessage()    {}
+func (*Port_PeerPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{9, 0}
+}
+
+func (m *Port_PeerPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Port_PeerPort.Unmarshal(m, b)
+}
+func (m *Port_PeerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Port_PeerPort.Marshal(b, m, deterministic)
+}
+func (m *Port_PeerPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Port_PeerPort.Merge(m, src)
+}
+func (m *Port_PeerPort) XXX_Size() int {
+	return xxx_messageInfo_Port_PeerPort.Size(m)
+}
+func (m *Port_PeerPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_Port_PeerPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Port_PeerPort proto.InternalMessageInfo
+
+func (m *Port_PeerPort) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Port_PeerPort) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+type Ports struct {
+	Items                []*Port  `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Ports) Reset()         { *m = Ports{} }
+func (m *Ports) String() string { return proto.CompactTextString(m) }
+func (*Ports) ProtoMessage()    {}
+func (*Ports) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{10}
+}
+
+func (m *Ports) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Ports.Unmarshal(m, b)
+}
+func (m *Ports) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Ports.Marshal(b, m, deterministic)
+}
+func (m *Ports) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Ports.Merge(m, src)
+}
+func (m *Ports) XXX_Size() int {
+	return xxx_messageInfo_Ports.Size(m)
+}
+func (m *Ports) XXX_DiscardUnknown() {
+	xxx_messageInfo_Ports.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ports proto.InternalMessageInfo
+
+func (m *Ports) GetItems() []*Port {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+// A Physical Device instance
+type Device struct {
+	// Voltha's device identifier
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Device type, refers to one of the registered device types
+	Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	// Is this device a root device. Each logical switch has one root
+	// device that is associated with the logical flow switch.
+	Root bool `protobuf:"varint,3,opt,name=root,proto3" json:"root,omitempty"`
+	// Parent device id, in the device tree (for a root device, the parent_id
+	// is the logical_device.id)
+	ParentId     string `protobuf:"bytes,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+	ParentPortNo uint32 `protobuf:"varint,20,opt,name=parent_port_no,json=parentPortNo,proto3" json:"parent_port_no,omitempty"`
+	// Vendor, version, serial number, etc.
+	Vendor          string `protobuf:"bytes,5,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Model           string `protobuf:"bytes,6,opt,name=model,proto3" json:"model,omitempty"`
+	HardwareVersion string `protobuf:"bytes,7,opt,name=hardware_version,json=hardwareVersion,proto3" json:"hardware_version,omitempty"`
+	FirmwareVersion string `protobuf:"bytes,8,opt,name=firmware_version,json=firmwareVersion,proto3" json:"firmware_version,omitempty"`
+	// List of software on the device
+	Images       *Images `protobuf:"bytes,9,opt,name=images,proto3" json:"images,omitempty"`
+	SerialNumber string  `protobuf:"bytes,10,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	VendorId     string  `protobuf:"bytes,24,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	// Addapter that takes care of device
+	Adapter string `protobuf:"bytes,11,opt,name=adapter,proto3" json:"adapter,omitempty"`
+	// Device contact on vlan (if 0, no vlan)
+	Vlan uint32 `protobuf:"varint,12,opt,name=vlan,proto3" json:"vlan,omitempty"`
+	// Device contact MAC address (format: "xx:xx:xx:xx:xx:xx")
+	MacAddress string `protobuf:"bytes,13,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"`
+	// Types that are valid to be assigned to Address:
+	//	*Device_Ipv4Address
+	//	*Device_Ipv6Address
+	//	*Device_HostAndPort
+	Address       isDevice_Address           `protobuf_oneof:"address"`
+	ExtraArgs     string                     `protobuf:"bytes,23,opt,name=extra_args,json=extraArgs,proto3" json:"extra_args,omitempty"`
+	ProxyAddress  *Device_ProxyAddress       `protobuf:"bytes,19,opt,name=proxy_address,json=proxyAddress,proto3" json:"proxy_address,omitempty"`
+	AdminState    common.AdminState_Types    `protobuf:"varint,16,opt,name=admin_state,json=adminState,proto3,enum=common.AdminState_Types" json:"admin_state,omitempty"`
+	OperStatus    common.OperStatus_Types    `protobuf:"varint,17,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	Reason        string                     `protobuf:"bytes,22,opt,name=reason,proto3" json:"reason,omitempty"`
+	ConnectStatus common.ConnectStatus_Types `protobuf:"varint,18,opt,name=connect_status,json=connectStatus,proto3,enum=common.ConnectStatus_Types" json:"connect_status,omitempty"`
+	// Device type specific attributes
+	Custom     *any.Any                `protobuf:"bytes,64,opt,name=custom,proto3" json:"custom,omitempty"`
+	Ports      []*Port                 `protobuf:"bytes,128,rep,name=ports,proto3" json:"ports,omitempty"`
+	Flows      *openflow_13.Flows      `protobuf:"bytes,129,opt,name=flows,proto3" json:"flows,omitempty"`
+	FlowGroups *openflow_13.FlowGroups `protobuf:"bytes,130,opt,name=flow_groups,json=flowGroups,proto3" json:"flow_groups,omitempty"`
+	// PmConfigs will eventually converted to a child node of the
+	// device to falicitata callbacks and to simplify manipulation.
+	PmConfigs            *PmConfigs       `protobuf:"bytes,131,opt,name=pm_configs,json=pmConfigs,proto3" json:"pm_configs,omitempty"`
+	ImageDownloads       []*ImageDownload `protobuf:"bytes,133,rep,name=image_downloads,json=imageDownloads,proto3" json:"image_downloads,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Device) Reset()         { *m = Device{} }
+func (m *Device) String() string { return proto.CompactTextString(m) }
+func (*Device) ProtoMessage()    {}
+func (*Device) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{11}
+}
+
+func (m *Device) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Device.Unmarshal(m, b)
+}
+func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Device.Marshal(b, m, deterministic)
+}
+func (m *Device) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Device.Merge(m, src)
+}
+func (m *Device) XXX_Size() int {
+	return xxx_messageInfo_Device.Size(m)
+}
+func (m *Device) XXX_DiscardUnknown() {
+	xxx_messageInfo_Device.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device proto.InternalMessageInfo
+
+func (m *Device) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *Device) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Device) GetRoot() bool {
+	if m != nil {
+		return m.Root
+	}
+	return false
+}
+
+func (m *Device) GetParentId() string {
+	if m != nil {
+		return m.ParentId
+	}
+	return ""
+}
+
+func (m *Device) GetParentPortNo() uint32 {
+	if m != nil {
+		return m.ParentPortNo
+	}
+	return 0
+}
+
+func (m *Device) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *Device) GetModel() string {
+	if m != nil {
+		return m.Model
+	}
+	return ""
+}
+
+func (m *Device) GetHardwareVersion() string {
+	if m != nil {
+		return m.HardwareVersion
+	}
+	return ""
+}
+
+func (m *Device) GetFirmwareVersion() string {
+	if m != nil {
+		return m.FirmwareVersion
+	}
+	return ""
+}
+
+func (m *Device) GetImages() *Images {
+	if m != nil {
+		return m.Images
+	}
+	return nil
+}
+
+func (m *Device) GetSerialNumber() string {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return ""
+}
+
+func (m *Device) GetVendorId() string {
+	if m != nil {
+		return m.VendorId
+	}
+	return ""
+}
+
+func (m *Device) GetAdapter() string {
+	if m != nil {
+		return m.Adapter
+	}
+	return ""
+}
+
+func (m *Device) GetVlan() uint32 {
+	if m != nil {
+		return m.Vlan
+	}
+	return 0
+}
+
+func (m *Device) GetMacAddress() string {
+	if m != nil {
+		return m.MacAddress
+	}
+	return ""
+}
+
+type isDevice_Address interface {
+	isDevice_Address()
+}
+
+type Device_Ipv4Address struct {
+	Ipv4Address string `protobuf:"bytes,14,opt,name=ipv4_address,json=ipv4Address,proto3,oneof"`
+}
+
+type Device_Ipv6Address struct {
+	Ipv6Address string `protobuf:"bytes,15,opt,name=ipv6_address,json=ipv6Address,proto3,oneof"`
+}
+
+type Device_HostAndPort struct {
+	HostAndPort string `protobuf:"bytes,21,opt,name=host_and_port,json=hostAndPort,proto3,oneof"`
+}
+
+func (*Device_Ipv4Address) isDevice_Address() {}
+
+func (*Device_Ipv6Address) isDevice_Address() {}
+
+func (*Device_HostAndPort) isDevice_Address() {}
+
+func (m *Device) GetAddress() isDevice_Address {
+	if m != nil {
+		return m.Address
+	}
+	return nil
+}
+
+func (m *Device) GetIpv4Address() string {
+	if x, ok := m.GetAddress().(*Device_Ipv4Address); ok {
+		return x.Ipv4Address
+	}
+	return ""
+}
+
+func (m *Device) GetIpv6Address() string {
+	if x, ok := m.GetAddress().(*Device_Ipv6Address); ok {
+		return x.Ipv6Address
+	}
+	return ""
+}
+
+func (m *Device) GetHostAndPort() string {
+	if x, ok := m.GetAddress().(*Device_HostAndPort); ok {
+		return x.HostAndPort
+	}
+	return ""
+}
+
+func (m *Device) GetExtraArgs() string {
+	if m != nil {
+		return m.ExtraArgs
+	}
+	return ""
+}
+
+func (m *Device) GetProxyAddress() *Device_ProxyAddress {
+	if m != nil {
+		return m.ProxyAddress
+	}
+	return nil
+}
+
+func (m *Device) GetAdminState() common.AdminState_Types {
+	if m != nil {
+		return m.AdminState
+	}
+	return common.AdminState_UNKNOWN
+}
+
+func (m *Device) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+func (m *Device) GetReason() string {
+	if m != nil {
+		return m.Reason
+	}
+	return ""
+}
+
+func (m *Device) GetConnectStatus() common.ConnectStatus_Types {
+	if m != nil {
+		return m.ConnectStatus
+	}
+	return common.ConnectStatus_UNKNOWN
+}
+
+func (m *Device) GetCustom() *any.Any {
+	if m != nil {
+		return m.Custom
+	}
+	return nil
+}
+
+func (m *Device) GetPorts() []*Port {
+	if m != nil {
+		return m.Ports
+	}
+	return nil
+}
+
+func (m *Device) GetFlows() *openflow_13.Flows {
+	if m != nil {
+		return m.Flows
+	}
+	return nil
+}
+
+func (m *Device) GetFlowGroups() *openflow_13.FlowGroups {
+	if m != nil {
+		return m.FlowGroups
+	}
+	return nil
+}
+
+func (m *Device) GetPmConfigs() *PmConfigs {
+	if m != nil {
+		return m.PmConfigs
+	}
+	return nil
+}
+
+func (m *Device) GetImageDownloads() []*ImageDownload {
+	if m != nil {
+		return m.ImageDownloads
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Device) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Device_Ipv4Address)(nil),
+		(*Device_Ipv6Address)(nil),
+		(*Device_HostAndPort)(nil),
+	}
+}
+
+type Device_ProxyAddress struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DeviceType           string   `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"`
+	ChannelId            uint32   `protobuf:"varint,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+	ChannelGroupId       uint32   `protobuf:"varint,4,opt,name=channel_group_id,json=channelGroupId,proto3" json:"channel_group_id,omitempty"`
+	ChannelTermination   string   `protobuf:"bytes,5,opt,name=channel_termination,json=channelTermination,proto3" json:"channel_termination,omitempty"`
+	OnuId                uint32   `protobuf:"varint,6,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	OnuSessionId         uint32   `protobuf:"varint,7,opt,name=onu_session_id,json=onuSessionId,proto3" json:"onu_session_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Device_ProxyAddress) Reset()         { *m = Device_ProxyAddress{} }
+func (m *Device_ProxyAddress) String() string { return proto.CompactTextString(m) }
+func (*Device_ProxyAddress) ProtoMessage()    {}
+func (*Device_ProxyAddress) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{11, 0}
+}
+
+func (m *Device_ProxyAddress) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Device_ProxyAddress.Unmarshal(m, b)
+}
+func (m *Device_ProxyAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Device_ProxyAddress.Marshal(b, m, deterministic)
+}
+func (m *Device_ProxyAddress) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Device_ProxyAddress.Merge(m, src)
+}
+func (m *Device_ProxyAddress) XXX_Size() int {
+	return xxx_messageInfo_Device_ProxyAddress.Size(m)
+}
+func (m *Device_ProxyAddress) XXX_DiscardUnknown() {
+	xxx_messageInfo_Device_ProxyAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device_ProxyAddress proto.InternalMessageInfo
+
+func (m *Device_ProxyAddress) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetDeviceType() string {
+	if m != nil {
+		return m.DeviceType
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetChannelId() uint32 {
+	if m != nil {
+		return m.ChannelId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetChannelGroupId() uint32 {
+	if m != nil {
+		return m.ChannelGroupId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetChannelTermination() string {
+	if m != nil {
+		return m.ChannelTermination
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetOnuSessionId() uint32 {
+	if m != nil {
+		return m.OnuSessionId
+	}
+	return 0
+}
+
+type Devices struct {
+	Items                []*Device `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Devices) Reset()         { *m = Devices{} }
+func (m *Devices) String() string { return proto.CompactTextString(m) }
+func (*Devices) ProtoMessage()    {}
+func (*Devices) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{12}
+}
+
+func (m *Devices) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Devices.Unmarshal(m, b)
+}
+func (m *Devices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Devices.Marshal(b, m, deterministic)
+}
+func (m *Devices) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Devices.Merge(m, src)
+}
+func (m *Devices) XXX_Size() int {
+	return xxx_messageInfo_Devices.Size(m)
+}
+func (m *Devices) XXX_DiscardUnknown() {
+	xxx_messageInfo_Devices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Devices proto.InternalMessageInfo
+
+func (m *Devices) GetItems() []*Device {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type SimulateAlarmRequest struct {
+	// Device Identifier
+	Id                   string                             `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Indicator            string                             `protobuf:"bytes,2,opt,name=indicator,proto3" json:"indicator,omitempty"`
+	IntfId               string                             `protobuf:"bytes,3,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	PortTypeName         string                             `protobuf:"bytes,4,opt,name=port_type_name,json=portTypeName,proto3" json:"port_type_name,omitempty"`
+	OnuDeviceId          string                             `protobuf:"bytes,5,opt,name=onu_device_id,json=onuDeviceId,proto3" json:"onu_device_id,omitempty"`
+	InverseBitErrorRate  int32                              `protobuf:"varint,6,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	Drift                int32                              `protobuf:"varint,7,opt,name=drift,proto3" json:"drift,omitempty"`
+	NewEqd               int32                              `protobuf:"varint,8,opt,name=new_eqd,json=newEqd,proto3" json:"new_eqd,omitempty"`
+	OnuSerialNumber      string                             `protobuf:"bytes,9,opt,name=onu_serial_number,json=onuSerialNumber,proto3" json:"onu_serial_number,omitempty"`
+	Operation            SimulateAlarmRequest_OperationType `protobuf:"varint,10,opt,name=operation,proto3,enum=voltha.SimulateAlarmRequest_OperationType" json:"operation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *SimulateAlarmRequest) Reset()         { *m = SimulateAlarmRequest{} }
+func (m *SimulateAlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*SimulateAlarmRequest) ProtoMessage()    {}
+func (*SimulateAlarmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{13}
+}
+
+func (m *SimulateAlarmRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SimulateAlarmRequest.Unmarshal(m, b)
+}
+func (m *SimulateAlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SimulateAlarmRequest.Marshal(b, m, deterministic)
+}
+func (m *SimulateAlarmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SimulateAlarmRequest.Merge(m, src)
+}
+func (m *SimulateAlarmRequest) XXX_Size() int {
+	return xxx_messageInfo_SimulateAlarmRequest.Size(m)
+}
+func (m *SimulateAlarmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SimulateAlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SimulateAlarmRequest proto.InternalMessageInfo
+
+func (m *SimulateAlarmRequest) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetIndicator() string {
+	if m != nil {
+		return m.Indicator
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetIntfId() string {
+	if m != nil {
+		return m.IntfId
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetPortTypeName() string {
+	if m != nil {
+		return m.PortTypeName
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetOnuDeviceId() string {
+	if m != nil {
+		return m.OnuDeviceId
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetInverseBitErrorRate() int32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetDrift() int32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetNewEqd() int32 {
+	if m != nil {
+		return m.NewEqd
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetOnuSerialNumber() string {
+	if m != nil {
+		return m.OnuSerialNumber
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetOperation() SimulateAlarmRequest_OperationType {
+	if m != nil {
+		return m.Operation
+	}
+	return SimulateAlarmRequest_RAISE
+}
+
+func init() {
+	proto.RegisterEnum("voltha.PmConfig_PmType", PmConfig_PmType_name, PmConfig_PmType_value)
+	proto.RegisterEnum("voltha.ImageDownload_ImageDownloadState", ImageDownload_ImageDownloadState_name, ImageDownload_ImageDownloadState_value)
+	proto.RegisterEnum("voltha.ImageDownload_ImageDownloadFailureReason", ImageDownload_ImageDownloadFailureReason_name, ImageDownload_ImageDownloadFailureReason_value)
+	proto.RegisterEnum("voltha.ImageDownload_ImageActivateState", ImageDownload_ImageActivateState_name, ImageDownload_ImageActivateState_value)
+	proto.RegisterEnum("voltha.Port_PortType", Port_PortType_name, Port_PortType_value)
+	proto.RegisterEnum("voltha.SimulateAlarmRequest_OperationType", SimulateAlarmRequest_OperationType_name, SimulateAlarmRequest_OperationType_value)
+	proto.RegisterType((*DeviceType)(nil), "voltha.DeviceType")
+	proto.RegisterType((*DeviceTypes)(nil), "voltha.DeviceTypes")
+	proto.RegisterType((*PmConfig)(nil), "voltha.PmConfig")
+	proto.RegisterType((*PmGroupConfig)(nil), "voltha.PmGroupConfig")
+	proto.RegisterType((*PmConfigs)(nil), "voltha.PmConfigs")
+	proto.RegisterType((*Image)(nil), "voltha.Image")
+	proto.RegisterType((*Images)(nil), "voltha.Images")
+	proto.RegisterType((*ImageDownload)(nil), "voltha.ImageDownload")
+	proto.RegisterType((*ImageDownloads)(nil), "voltha.ImageDownloads")
+	proto.RegisterType((*Port)(nil), "voltha.Port")
+	proto.RegisterType((*Port_PeerPort)(nil), "voltha.Port.PeerPort")
+	proto.RegisterType((*Ports)(nil), "voltha.Ports")
+	proto.RegisterType((*Device)(nil), "voltha.Device")
+	proto.RegisterType((*Device_ProxyAddress)(nil), "voltha.Device.ProxyAddress")
+	proto.RegisterType((*Devices)(nil), "voltha.Devices")
+	proto.RegisterType((*SimulateAlarmRequest)(nil), "voltha.SimulateAlarmRequest")
+}
+
+func init() { proto.RegisterFile("voltha_protos/device.proto", fileDescriptor_200940f73d155856) }
+
+var fileDescriptor_200940f73d155856 = []byte{
+	// 2359 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x4d, 0x73, 0xdb, 0xc6,
+	0xf9, 0x37, 0x29, 0x01, 0x24, 0x1e, 0xbe, 0x08, 0x5e, 0xcb, 0x31, 0x6c, 0xfd, 0x35, 0xf6, 0x9f,
+	0x4e, 0xa7, 0x4a, 0x52, 0x4b, 0x6e, 0xdc, 0x49, 0xd2, 0x43, 0x67, 0x4c, 0x91, 0xb0, 0x8d, 0xa9,
+	0x4a, 0xa9, 0x4b, 0x52, 0x69, 0x7b, 0xc1, 0x40, 0xc4, 0x4a, 0xc2, 0x04, 0x2f, 0xf4, 0x02, 0xa4,
+	0xe5, 0x9c, 0xda, 0x66, 0xd2, 0x53, 0x6f, 0xfd, 0x12, 0xfd, 0x06, 0x3d, 0xb6, 0x33, 0x3d, 0x67,
+	0xfa, 0x1d, 0xda, 0x99, 0x4e, 0x3f, 0x41, 0xce, 0x9d, 0x7d, 0x76, 0x97, 0x04, 0x64, 0xd7, 0x69,
+	0x2f, 0x12, 0xf6, 0xf7, 0xbc, 0xec, 0xee, 0x6f, 0x77, 0x9f, 0x17, 0xc2, 0xbd, 0x65, 0x16, 0x17,
+	0x97, 0x81, 0x3f, 0xe7, 0x59, 0x91, 0xe5, 0x07, 0x21, 0x5b, 0x46, 0x33, 0xb6, 0x8f, 0x23, 0x62,
+	0x4a, 0xd9, 0xbd, 0xbb, 0x17, 0x59, 0x76, 0x11, 0xb3, 0x03, 0x44, 0xcf, 0x16, 0xe7, 0x07, 0x41,
+	0xfa, 0x5a, 0xaa, 0xdc, 0xbb, 0x66, 0x3e, 0xcb, 0x92, 0x24, 0x4b, 0x95, 0xcc, 0xa9, 0xca, 0x12,
+	0x56, 0x04, 0x4a, 0x72, 0xbf, 0x2a, 0xc9, 0xe6, 0x2c, 0x3d, 0x8f, 0xb3, 0x57, 0xfe, 0x0f, 0x9f,
+	0x48, 0x85, 0xde, 0x9f, 0xeb, 0x00, 0x43, 0x5c, 0xca, 0xe4, 0xf5, 0x9c, 0x91, 0x2e, 0xd4, 0xa3,
+	0xd0, 0xa9, 0x3d, 0xa8, 0xed, 0x59, 0xb4, 0x1e, 0x85, 0x64, 0x07, 0xac, 0x25, 0x4b, 0xc3, 0x8c,
+	0xfb, 0x51, 0xe8, 0x18, 0x08, 0x37, 0x25, 0xe0, 0x85, 0x64, 0x17, 0x60, 0x25, 0xcc, 0x1d, 0xf3,
+	0xc1, 0xc6, 0x9e, 0x45, 0x2d, 0x2d, 0xcd, 0x89, 0x03, 0x8d, 0x20, 0x0c, 0xe6, 0x05, 0xe3, 0x4e,
+	0x1d, 0x2d, 0xf5, 0x90, 0x7c, 0x0a, 0x4e, 0x30, 0x9b, 0xb1, 0x79, 0x91, 0xfb, 0x67, 0x8b, 0xf8,
+	0x0b, 0x1f, 0x97, 0xb4, 0x98, 0x87, 0x41, 0xc1, 0x9c, 0x8d, 0x07, 0xb5, 0xbd, 0x26, 0xbd, 0xad,
+	0xe4, 0x87, 0x8b, 0xf8, 0x8b, 0x67, 0x71, 0xf6, 0x6a, 0x8a, 0x42, 0x32, 0x84, 0xfb, 0xda, 0x30,
+	0x08, 0x43, 0x9f, 0xb3, 0x24, 0x5b, 0xb2, 0xb2, 0x79, 0xee, 0x6c, 0xa2, 0xfd, 0x8e, 0x52, 0xeb,
+	0x87, 0x21, 0x45, 0xa5, 0xb5, 0x93, 0x9c, 0x1c, 0xc1, 0x43, 0xed, 0x25, 0x8c, 0x38, 0x9b, 0x15,
+	0x7e, 0x9c, 0x5d, 0x44, 0xb3, 0x20, 0x46, 0x4f, 0xb9, 0x5e, 0x49, 0x03, 0x3d, 0xe9, 0x09, 0x87,
+	0xa8, 0x79, 0x24, 0x15, 0x85, 0xb7, 0x5c, 0xba, 0xeb, 0x7d, 0x0a, 0xad, 0x35, 0x81, 0x39, 0xd9,
+	0x03, 0x23, 0x2a, 0x58, 0x92, 0x3b, 0xb5, 0x07, 0x1b, 0x7b, 0xad, 0x8f, 0xc9, 0xbe, 0x3c, 0x81,
+	0xfd, 0xb5, 0x0e, 0x95, 0x0a, 0xbd, 0xbf, 0xd4, 0xa0, 0x79, 0x92, 0x0c, 0xb2, 0xf4, 0x3c, 0xba,
+	0x20, 0x04, 0x36, 0xd3, 0x20, 0x61, 0x8a, 0x7a, 0xfc, 0x26, 0x1f, 0xc1, 0x66, 0xf1, 0x7a, 0xce,
+	0x90, 0xbd, 0xee, 0xc7, 0x77, 0xb4, 0x27, 0x6d, 0xb3, 0x7f, 0x92, 0xa0, 0x3b, 0x54, 0x12, 0x6c,
+	0xb3, 0x34, 0x38, 0x8b, 0x59, 0xa8, 0x28, 0xd4, 0x43, 0x72, 0x1f, 0x5a, 0x79, 0x90, 0xcc, 0x63,
+	0xe6, 0x9f, 0x73, 0xf6, 0x12, 0x09, 0xea, 0x50, 0x90, 0xd0, 0x33, 0xce, 0x5e, 0xf6, 0x3e, 0x03,
+	0x53, 0xba, 0x22, 0x2d, 0x68, 0x0c, 0x8e, 0xa7, 0xa3, 0x89, 0x4b, 0xed, 0x1b, 0xc4, 0x02, 0xe3,
+	0x79, 0x7f, 0xfa, 0xdc, 0xb5, 0x6b, 0xe2, 0x73, 0x3c, 0xe9, 0x4f, 0x5c, 0xbb, 0x2e, 0x55, 0x46,
+	0x13, 0xf7, 0x17, 0x13, 0x7b, 0xa3, 0xf7, 0x87, 0x1a, 0x74, 0x4e, 0x92, 0xe7, 0x3c, 0x5b, 0xcc,
+	0xd5, 0x3e, 0x76, 0x01, 0x2e, 0xc4, 0xd0, 0x2f, 0xed, 0xc6, 0x42, 0x64, 0x24, 0xb6, 0xb4, 0x12,
+	0xe3, 0x52, 0xea, 0xb8, 0x14, 0x29, 0x16, 0x2b, 0x79, 0xc7, 0x26, 0x3e, 0x84, 0x46, 0xc2, 0x0a,
+	0x1e, 0xcd, 0xc4, 0x09, 0x0b, 0x62, 0xed, 0xeb, 0x74, 0x50, 0xad, 0xd0, 0xfb, 0x67, 0x0d, 0x2c,
+	0x8d, 0xe6, 0x6f, 0x5c, 0xe9, 0xff, 0x87, 0x76, 0xc8, 0xce, 0x83, 0x45, 0x5c, 0x94, 0x17, 0xd1,
+	0x52, 0x18, 0x2e, 0xe3, 0x3e, 0x34, 0x70, 0x4d, 0x7a, 0x19, 0x87, 0xc6, 0xbf, 0xbe, 0xfd, 0x66,
+	0xb7, 0x46, 0x35, 0x4a, 0x3e, 0x84, 0x8e, 0xb0, 0xf5, 0xb3, 0x25, 0xe3, 0x3c, 0x0a, 0x99, 0xbc,
+	0x75, 0x5a, 0xad, 0x2d, 0x64, 0xc7, 0x4a, 0x44, 0x1e, 0x81, 0x89, 0x66, 0xb9, 0x63, 0xe0, 0xc2,
+	0x6f, 0xaf, 0x17, 0x5e, 0x22, 0x8e, 0x2a, 0xa5, 0xf2, 0x46, 0xcd, 0xef, 0xda, 0xe8, 0xdf, 0x6a,
+	0x60, 0x78, 0x49, 0x70, 0xc1, 0xde, 0x7a, 0x7d, 0x1c, 0x68, 0x2c, 0x19, 0xcf, 0xa3, 0x2c, 0xd5,
+	0xef, 0x4f, 0x0d, 0x85, 0xf6, 0x65, 0x90, 0x5f, 0xe2, 0xe6, 0x2c, 0x8a, 0xdf, 0xe4, 0x03, 0xb0,
+	0xa3, 0x34, 0x2f, 0x82, 0x38, 0xf6, 0xc5, 0xb5, 0x2e, 0xa2, 0x44, 0xee, 0xca, 0xa2, 0x5b, 0x0a,
+	0x1f, 0x2a, 0x58, 0x04, 0x85, 0x28, 0xf7, 0x83, 0x59, 0x11, 0x2d, 0x19, 0x06, 0x85, 0x26, 0x6d,
+	0x46, 0x79, 0x1f, 0xc7, 0x82, 0xde, 0x28, 0xf7, 0x45, 0x78, 0x8a, 0x8a, 0x82, 0x85, 0x8e, 0x89,
+	0xf2, 0x56, 0x94, 0x0f, 0x34, 0x44, 0xee, 0x42, 0x33, 0xca, 0xfd, 0x65, 0x10, 0x47, 0xa1, 0x7a,
+	0x64, 0x8d, 0x28, 0x3f, 0x15, 0xc3, 0xde, 0x23, 0x30, 0x71, 0x43, 0x39, 0x79, 0x08, 0x46, 0x24,
+	0xbe, 0xd4, 0x3b, 0xea, 0x68, 0x16, 0x50, 0x4c, 0xa5, 0xac, 0xf7, 0x8f, 0x06, 0x74, 0x10, 0x18,
+	0x66, 0xaf, 0xd2, 0x38, 0x0b, 0xc2, 0x37, 0x4e, 0x5b, 0x13, 0x53, 0x2f, 0x11, 0x63, 0xc3, 0xc6,
+	0x82, 0xc7, 0x6a, 0xf7, 0xe2, 0x53, 0x20, 0x33, 0x3e, 0x53, 0x4f, 0x43, 0x7c, 0x92, 0x63, 0xe8,
+	0x86, 0xca, 0xa7, 0x9f, 0x17, 0x22, 0x1c, 0x18, 0xf8, 0x0a, 0xf7, 0x2a, 0xeb, 0xd0, 0xd3, 0x56,
+	0x47, 0x63, 0xa1, 0x4f, 0x3b, 0x61, 0x79, 0x48, 0x1e, 0x42, 0x07, 0xd7, 0xec, 0xeb, 0x33, 0x31,
+	0x71, 0xfa, 0x36, 0x82, 0xa7, 0xea, 0x60, 0x3e, 0x00, 0x5b, 0x5b, 0xb1, 0xd0, 0x3f, 0x7b, 0x2d,
+	0x02, 0x5a, 0x03, 0x17, 0xb5, 0xb5, 0xc6, 0x0f, 0x05, 0x4c, 0x5e, 0x80, 0xc9, 0x59, 0x90, 0x67,
+	0xa9, 0xd3, 0xc4, 0x85, 0x3d, 0xfe, 0x2f, 0x16, 0xf6, 0x2c, 0x88, 0xe2, 0x05, 0x67, 0x14, 0xed,
+	0xa8, 0xb2, 0x27, 0xdf, 0x87, 0xad, 0x20, 0x0c, 0xa3, 0x22, 0xca, 0xd2, 0x20, 0xf6, 0xa3, 0xf4,
+	0x3c, 0x73, 0x2c, 0x5c, 0x5b, 0x77, 0x0d, 0x7b, 0xe9, 0x79, 0x26, 0x03, 0xc9, 0x92, 0xf9, 0x33,
+	0xbc, 0x86, 0x0e, 0xe0, 0xd1, 0x81, 0x80, 0xd4, 0xe3, 0xdf, 0x01, 0x2b, 0xce, 0x44, 0x1c, 0x0d,
+	0x23, 0xee, 0xb4, 0x64, 0xb6, 0x40, 0x60, 0x18, 0x71, 0xe2, 0x41, 0x4b, 0x12, 0x20, 0xe9, 0x6c,
+	0x7f, 0x27, 0x9d, 0x78, 0xa1, 0x82, 0x82, 0x49, 0x3a, 0x01, 0x8d, 0x25, 0x97, 0x3b, 0x60, 0x9d,
+	0x47, 0x31, 0xf3, 0xf3, 0xe8, 0x4b, 0xe6, 0x74, 0x90, 0x9f, 0xa6, 0x00, 0xc6, 0xd1, 0x97, 0xac,
+	0xf7, 0xa7, 0x1a, 0x90, 0x37, 0x8f, 0x83, 0x6c, 0x83, 0x3d, 0x3c, 0xfe, 0x7c, 0x74, 0x74, 0xdc,
+	0x1f, 0xfa, 0xd3, 0xd1, 0x4f, 0x47, 0xc7, 0x9f, 0x8f, 0xec, 0x1b, 0xe4, 0x3d, 0x20, 0x2b, 0x74,
+	0x3c, 0x1d, 0x0c, 0x5c, 0x77, 0xe8, 0x0e, 0xed, 0x5a, 0x05, 0xa7, 0xee, 0xcf, 0xa7, 0xee, 0x78,
+	0xe2, 0x0e, 0xed, 0x7a, 0xc5, 0xcb, 0x78, 0xd2, 0xa7, 0x02, 0xdd, 0x20, 0xb7, 0x60, 0x6b, 0x85,
+	0x3e, 0xeb, 0x7b, 0x47, 0xee, 0xd0, 0xde, 0x24, 0x0e, 0x6c, 0x97, 0x26, 0x1c, 0x4f, 0x4f, 0x4e,
+	0x8e, 0x51, 0xdd, 0xa8, 0x38, 0x1f, 0xf4, 0x47, 0x03, 0xf7, 0x48, 0x58, 0x98, 0xbd, 0xdf, 0xd5,
+	0xe0, 0xde, 0x7f, 0x3e, 0x2f, 0xd2, 0x86, 0xe6, 0xe8, 0xd8, 0x77, 0x29, 0x3d, 0x16, 0xd1, 0x79,
+	0x0b, 0x5a, 0xde, 0xe8, 0xb4, 0x7f, 0xe4, 0x0d, 0xfd, 0x29, 0x3d, 0xb2, 0x6b, 0x02, 0x18, 0xba,
+	0xa7, 0xde, 0xc0, 0xf5, 0x0f, 0xa7, 0xe3, 0x5f, 0xda, 0x75, 0x31, 0x8d, 0x37, 0x1a, 0x4f, 0x9f,
+	0x3d, 0xf3, 0x06, 0x9e, 0x3b, 0x9a, 0xf8, 0xe3, 0x93, 0xfe, 0xc0, 0xb5, 0x37, 0xc8, 0x4d, 0xe8,
+	0x28, 0x02, 0x94, 0xb3, 0x4d, 0xd2, 0x01, 0x6b, 0xbd, 0x10, 0xa3, 0xf7, 0x7b, 0x4d, 0x61, 0xe5,
+	0x08, 0x84, 0xa1, 0xf7, 0xb3, 0xfe, 0x73, 0xb7, 0xc4, 0x1f, 0x81, 0xae, 0x84, 0xbc, 0x51, 0x7f,
+	0x30, 0xf1, 0x4e, 0x45, 0xb2, 0xd8, 0x06, 0x5b, 0x62, 0x88, 0xf4, 0x27, 0xde, 0xe8, 0xb9, 0x5d,
+	0x27, 0x36, 0xb4, 0x4b, 0xa8, 0x2b, 0x59, 0x93, 0x08, 0x75, 0x4f, 0x5d, 0x8a, 0x6a, 0x9b, 0x6b,
+	0x87, 0x12, 0xc4, 0xe5, 0xfc, 0x04, 0xba, 0x15, 0x5a, 0x72, 0xf2, 0x91, 0x4e, 0xb2, 0xf5, 0x6a,
+	0x48, 0xad, 0xa8, 0xe9, 0x3c, 0xfb, 0xb5, 0x01, 0x9b, 0x27, 0x19, 0x2f, 0xc8, 0x1d, 0x68, 0xcc,
+	0x33, 0x5e, 0xf8, 0x69, 0x86, 0x01, 0xa2, 0x43, 0x4d, 0x31, 0x1c, 0x65, 0x64, 0x1b, 0x8c, 0x38,
+	0x38, 0x63, 0xb1, 0x8a, 0x12, 0x72, 0x40, 0x3e, 0x50, 0xe9, 0x77, 0x03, 0x6f, 0xea, 0x3a, 0x6c,
+	0x67, 0xbc, 0xc0, 0x3f, 0xa5, 0xe4, 0xfb, 0x63, 0x68, 0x05, 0x61, 0x12, 0xa5, 0x95, 0x50, 0xe1,
+	0xec, 0xab, 0x22, 0xad, 0x2f, 0x44, 0x48, 0xe1, 0x3e, 0xd6, 0x08, 0x14, 0x82, 0x15, 0x22, 0x4c,
+	0xb3, 0x39, 0xe3, 0x68, 0xb9, 0xc8, 0x31, 0x2a, 0x94, 0x4c, 0x8f, 0xe7, 0x8c, 0x8f, 0x51, 0xa2,
+	0x4d, 0xb3, 0x15, 0x22, 0x9e, 0x81, 0xac, 0x22, 0x7d, 0x15, 0x48, 0x2d, 0xda, 0x94, 0x80, 0x17,
+	0x0a, 0x8a, 0xe6, 0x8c, 0xf1, 0xdc, 0x69, 0x5e, 0xcb, 0x3a, 0xb8, 0x7c, 0xc6, 0xb8, 0xf8, 0xa0,
+	0x52, 0x47, 0xa4, 0x65, 0x7e, 0xe5, 0xcf, 0x83, 0xd9, 0x17, 0xac, 0xc8, 0xf1, 0xf5, 0x9b, 0xd4,
+	0xe2, 0x57, 0x27, 0x12, 0x10, 0x01, 0x9b, 0x5f, 0xa9, 0x70, 0x04, 0x28, 0x6c, 0xf0, 0x2b, 0x19,
+	0x86, 0x76, 0xc0, 0xe2, 0x57, 0x3e, 0xe3, 0x3c, 0xe3, 0x39, 0x3e, 0x79, 0x93, 0x36, 0xf9, 0x95,
+	0x8b, 0x63, 0xe1, 0xb6, 0x58, 0xbb, 0x6d, 0x4b, 0xb7, 0x45, 0xd9, 0x6d, 0xa1, 0xdd, 0x76, 0xa4,
+	0xdb, 0x62, 0xed, 0xb6, 0x58, 0xb9, 0xed, 0x4a, 0xb7, 0x85, 0x72, 0x7b, 0xef, 0x29, 0x34, 0xf5,
+	0x06, 0xaa, 0x1c, 0xd4, 0xae, 0x71, 0x50, 0x3a, 0xf0, 0x7a, 0xf9, 0xc0, 0x7b, 0x39, 0x34, 0xf5,
+	0x09, 0x8a, 0x82, 0x66, 0x7d, 0x9f, 0x6d, 0x68, 0xbb, 0x93, 0x17, 0x2e, 0x1d, 0xb9, 0x13, 0x7f,
+	0x34, 0xf2, 0xec, 0x5a, 0x05, 0x99, 0x8e, 0x3c, 0x59, 0x01, 0x9d, 0x1c, 0x8f, 0xfc, 0xe3, 0xa3,
+	0x89, 0xbd, 0xb1, 0x1a, 0x8c, 0xa6, 0xf2, 0x19, 0x9d, 0xba, 0x42, 0x51, 0xc8, 0x8c, 0xd2, 0x70,
+	0x34, 0xb5, 0xcd, 0xde, 0x47, 0x60, 0x88, 0x49, 0x73, 0xd2, 0xab, 0x96, 0x88, 0xed, 0xf2, 0xd1,
+	0xe8, 0x4b, 0xfb, 0xd7, 0x36, 0x98, 0xb2, 0x64, 0x24, 0xb7, 0xd7, 0x29, 0x4d, 0x57, 0x18, 0x22,
+	0xb3, 0xdd, 0x2d, 0x55, 0x87, 0x2b, 0x81, 0xbc, 0x8e, 0x77, 0x61, 0x93, 0x67, 0x59, 0x51, 0x2d,
+	0x5e, 0x10, 0x22, 0x3d, 0xb0, 0xe6, 0x01, 0x67, 0x69, 0x21, 0xf8, 0xda, 0x2c, 0x9b, 0x36, 0x25,
+	0x8e, 0x57, 0xa7, 0xab, 0x74, 0x34, 0x7b, 0xdb, 0x82, 0xbd, 0x55, 0x79, 0x23, 0x85, 0x27, 0xf2,
+	0xed, 0xec, 0x82, 0x29, 0x4b, 0x7e, 0xd9, 0x1e, 0x68, 0x25, 0x05, 0x92, 0x1d, 0x30, 0x92, 0x2c,
+	0x64, 0xb1, 0x4c, 0x77, 0x5a, 0x2a, 0x31, 0xf2, 0x18, 0xec, 0xcb, 0x80, 0x87, 0xaf, 0x02, 0xbe,
+	0x4e, 0x8b, 0x8d, 0xb2, 0xde, 0x96, 0x16, 0xeb, 0x04, 0xf9, 0x18, 0xec, 0xf3, 0x88, 0x27, 0x15,
+	0x8b, 0x66, 0xc5, 0x42, 0x8b, 0xb5, 0xc5, 0x23, 0x30, 0x31, 0x73, 0xc8, 0x6b, 0xdd, 0xfa, 0xb8,
+	0x5b, 0x89, 0x15, 0xf9, 0x6a, 0xbd, 0x52, 0x49, 0x54, 0x76, 0x39, 0xe3, 0x51, 0x10, 0xfb, 0xe9,
+	0x22, 0x39, 0x63, 0x1c, 0xef, 0xfb, 0xca, 0x7b, 0x5b, 0xca, 0x46, 0x28, 0x12, 0x5c, 0xae, 0x9b,
+	0x23, 0xa7, 0xc2, 0xe5, 0xaa, 0x47, 0xba, 0xbf, 0x6e, 0x82, 0x5a, 0x65, 0x8d, 0x55, 0x2f, 0x44,
+	0x60, 0x73, 0x19, 0x07, 0x29, 0xbe, 0x8e, 0x0e, 0xc5, 0x6f, 0x91, 0x68, 0x93, 0x60, 0x26, 0x5a,
+	0x1c, 0xce, 0x72, 0xf9, 0x36, 0x2c, 0x0a, 0x49, 0x30, 0xeb, 0x4b, 0x84, 0x3c, 0x84, 0x76, 0x34,
+	0x5f, 0xfe, 0x68, 0xa5, 0x21, 0x5e, 0x88, 0xf5, 0xe2, 0x06, 0x6d, 0x09, 0xb4, 0xaa, 0xf4, 0xc9,
+	0x4a, 0x69, 0xab, 0xa4, 0xf4, 0x89, 0x56, 0x7a, 0x1f, 0x3a, 0x97, 0x59, 0x5e, 0xf8, 0x41, 0x1a,
+	0xe2, 0x69, 0x3b, 0xb7, 0xb5, 0x96, 0x80, 0xfb, 0x69, 0x88, 0xaf, 0x6c, 0x17, 0x80, 0x5d, 0x15,
+	0x3c, 0xf0, 0x03, 0x7e, 0x91, 0x3b, 0x77, 0x64, 0x55, 0x8f, 0x48, 0x9f, 0x5f, 0xe4, 0xe4, 0x29,
+	0x74, 0xe6, 0x3c, 0xbb, 0x7a, 0xbd, 0x9a, 0xea, 0x16, 0x52, 0xbd, 0x53, 0xed, 0x7d, 0xf6, 0x4f,
+	0x84, 0x8e, 0x9a, 0x98, 0xb6, 0xe7, 0xa5, 0xd1, 0xf5, 0x00, 0x6a, 0xff, 0x0f, 0x01, 0xf4, 0x69,
+	0x35, 0x80, 0xde, 0x7c, 0x77, 0x00, 0xd5, 0xfc, 0x97, 0xe3, 0xe8, 0xee, 0xaa, 0x94, 0x7a, 0xaf,
+	0x72, 0x85, 0x55, 0x7d, 0xe4, 0x41, 0x77, 0x96, 0xa5, 0xa9, 0xe8, 0x13, 0xd5, 0x1c, 0x04, 0xe7,
+	0xd8, 0xd1, 0x73, 0x0c, 0xa4, 0xf4, 0x6d, 0xd3, 0x74, 0x66, 0x65, 0x19, 0xf9, 0x01, 0x98, 0xb3,
+	0x45, 0x5e, 0x64, 0x89, 0xf3, 0x14, 0x19, 0xda, 0xde, 0x97, 0x0d, 0xff, 0xbe, 0x6e, 0xf8, 0xf7,
+	0xfb, 0xe9, 0x6b, 0xaa, 0x74, 0xc8, 0x13, 0x30, 0xc4, 0x91, 0xe4, 0xce, 0xaf, 0xdf, 0x12, 0x28,
+	0x0e, 0xbb, 0x7f, 0xff, 0xf6, 0x9b, 0x5d, 0x6b, 0x15, 0xe1, 0xa8, 0xd4, 0x25, 0x8f, 0xc1, 0xc0,
+	0x2e, 0xd6, 0xf9, 0x4d, 0x0d, 0xa7, 0x20, 0xfb, 0xe5, 0xa6, 0x1f, 0x1b, 0xd7, 0x43, 0x43, 0x98,
+	0xde, 0xa0, 0x52, 0x51, 0x10, 0x88, 0x62, 0xd5, 0xa5, 0xfc, 0x56, 0xda, 0xdd, 0x79, 0xc3, 0x0e,
+	0xbb, 0x95, 0x95, 0x31, 0x9c, 0xaf, 0x20, 0xf2, 0x19, 0xc0, 0x3c, 0x51, 0x65, 0x61, 0xee, 0x7c,
+	0x25, 0x1d, 0xdc, 0xbc, 0xde, 0xb7, 0xac, 0x4c, 0xad, 0xf9, 0xaa, 0x39, 0x3b, 0x82, 0x2d, 0x59,
+	0x14, 0xea, 0xf2, 0x36, 0x77, 0xbe, 0xae, 0xbd, 0x23, 0xa7, 0x1f, 0xb6, 0x84, 0x0b, 0x53, 0x16,
+	0xf5, 0xb4, 0x1b, 0x55, 0xca, 0x82, 0x7b, 0x5f, 0xd5, 0xa1, 0x5d, 0xbe, 0x64, 0xef, 0xce, 0x0e,
+	0xf7, 0xa1, 0xa5, 0x84, 0xeb, 0x38, 0x4a, 0x21, 0x5c, 0xff, 0x18, 0xb2, 0x0b, 0x30, 0xbb, 0x0c,
+	0xd2, 0x94, 0xc5, 0xc2, 0x7c, 0x43, 0x36, 0xab, 0x0a, 0xf1, 0x42, 0xb2, 0x07, 0xb6, 0x16, 0xcb,
+	0x9e, 0x56, 0x45, 0xd4, 0x0e, 0xed, 0x2a, 0x1c, 0xe9, 0xf1, 0x42, 0x72, 0x00, 0xb7, 0xb4, 0x66,
+	0xc1, 0x78, 0x12, 0xa5, 0x81, 0xa8, 0xaa, 0xd5, 0xef, 0x29, 0x44, 0x89, 0x26, 0x6b, 0x09, 0xb9,
+	0x0d, 0x66, 0x96, 0x2e, 0x84, 0x43, 0x13, 0x1d, 0x1a, 0x59, 0xba, 0xf0, 0x42, 0xf2, 0x3e, 0x74,
+	0x05, 0x9c, 0xb3, 0x5c, 0x84, 0x36, 0x9d, 0xf5, 0x3b, 0xb4, 0x9d, 0xa5, 0x8b, 0xb1, 0x04, 0xbd,
+	0xf0, 0xd0, 0x12, 0x21, 0x07, 0xf7, 0xdf, 0x3b, 0x80, 0x86, 0x7c, 0x7b, 0xe2, 0xa1, 0x57, 0x92,
+	0x4e, 0xb7, 0xfa, 0x36, 0x75, 0xda, 0xf9, 0xe3, 0x06, 0x6c, 0x8f, 0xa3, 0x64, 0x11, 0x07, 0x05,
+	0xeb, 0xc7, 0x01, 0x4f, 0x28, 0x7b, 0xb9, 0x60, 0x79, 0xf1, 0x46, 0x5f, 0xf5, 0x7f, 0x60, 0x45,
+	0x69, 0x18, 0xcd, 0x82, 0x22, 0xd3, 0x3f, 0xef, 0xac, 0x01, 0x91, 0x78, 0xa3, 0xb4, 0x38, 0xd7,
+	0xb4, 0x59, 0xd4, 0x14, 0x43, 0xb9, 0x03, 0xbc, 0xaf, 0x82, 0x71, 0xf9, 0x13, 0x81, 0xec, 0x31,
+	0xdb, 0x73, 0x95, 0x8e, 0xf1, 0x57, 0x82, 0x1e, 0x74, 0xc4, 0x3e, 0xd7, 0x47, 0x27, 0x99, 0x6a,
+	0x65, 0xe9, 0x62, 0xa8, 0x4f, 0xef, 0x09, 0xbc, 0x17, 0xa5, 0x22, 0x05, 0x30, 0xff, 0x2c, 0x2a,
+	0x64, 0xa9, 0xe0, 0x73, 0x11, 0x3c, 0x04, 0x65, 0x06, 0xbd, 0xa5, 0xa4, 0x87, 0x51, 0x81, 0x65,
+	0x03, 0x95, 0x4d, 0x80, 0x11, 0xf2, 0xe8, 0xbc, 0x40, 0xde, 0x0c, 0x2a, 0x07, 0x62, 0xb5, 0x29,
+	0x7b, 0xe5, 0xb3, 0x97, 0x21, 0xe6, 0x12, 0x83, 0x9a, 0x29, 0x7b, 0xe5, 0xbe, 0x14, 0x6d, 0xfe,
+	0x4d, 0xc9, 0x77, 0x39, 0x21, 0xc8, 0xde, 0x68, 0x0b, 0x29, 0x2f, 0x25, 0x83, 0x17, 0x60, 0x89,
+	0x90, 0x22, 0x4f, 0x16, 0x30, 0x40, 0x7c, 0xa8, 0x39, 0x7e, 0x1b, 0xa3, 0x18, 0x99, 0x50, 0x1b,
+	0xeb, 0xc8, 0xb5, 0x71, 0xef, 0x7b, 0xd0, 0xa9, 0xc8, 0x88, 0x05, 0x06, 0xed, 0x7b, 0x63, 0x57,
+	0xfe, 0x26, 0x33, 0x38, 0x72, 0xfb, 0xd4, 0xae, 0x1d, 0x8e, 0xe1, 0x56, 0xc6, 0x2f, 0xf0, 0x95,
+	0xce, 0x32, 0x1e, 0xaa, 0xb9, 0x0e, 0xdb, 0xa7, 0xf8, 0x5f, 0xf2, 0xf4, 0xab, 0xfd, 0x8b, 0xa8,
+	0xb8, 0x5c, 0x9c, 0x89, 0x48, 0x75, 0xa0, 0x35, 0x0f, 0xa4, 0xe6, 0x23, 0xf5, 0x9b, 0xe0, 0xf2,
+	0xc9, 0xc1, 0x45, 0xa6, 0xb0, 0x33, 0x13, 0xc1, 0x27, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x35,
+	0x16, 0x1c, 0x6d, 0xad, 0x14, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/events.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/events.pb.go
new file mode 100644
index 0000000..fd6eba6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/events.pb.go
@@ -0,0 +1,1552 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/events.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ConfigEventType_Types int32
+
+const (
+	ConfigEventType_add    ConfigEventType_Types = 0
+	ConfigEventType_remove ConfigEventType_Types = 1
+	ConfigEventType_update ConfigEventType_Types = 2
+)
+
+var ConfigEventType_Types_name = map[int32]string{
+	0: "add",
+	1: "remove",
+	2: "update",
+}
+
+var ConfigEventType_Types_value = map[string]int32{
+	"add":    0,
+	"remove": 1,
+	"update": 2,
+}
+
+func (x ConfigEventType_Types) String() string {
+	return proto.EnumName(ConfigEventType_Types_name, int32(x))
+}
+
+func (ConfigEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{0, 0}
+}
+
+type KpiEventType_Types int32
+
+const (
+	KpiEventType_slice KpiEventType_Types = 0
+	KpiEventType_ts    KpiEventType_Types = 1
+)
+
+var KpiEventType_Types_name = map[int32]string{
+	0: "slice",
+	1: "ts",
+}
+
+var KpiEventType_Types_value = map[string]int32{
+	"slice": 0,
+	"ts":    1,
+}
+
+func (x KpiEventType_Types) String() string {
+	return proto.EnumName(KpiEventType_Types_name, int32(x))
+}
+
+func (KpiEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{2, 0}
+}
+
+type AlarmEventType_Types int32
+
+const (
+	AlarmEventType_COMMUNICATION AlarmEventType_Types = 0
+	AlarmEventType_ENVIRONMENT   AlarmEventType_Types = 1
+	AlarmEventType_EQUIPMENT     AlarmEventType_Types = 2
+	AlarmEventType_SERVICE       AlarmEventType_Types = 3
+	AlarmEventType_PROCESSING    AlarmEventType_Types = 4
+	AlarmEventType_SECURITY      AlarmEventType_Types = 5
+)
+
+var AlarmEventType_Types_name = map[int32]string{
+	0: "COMMUNICATION",
+	1: "ENVIRONMENT",
+	2: "EQUIPMENT",
+	3: "SERVICE",
+	4: "PROCESSING",
+	5: "SECURITY",
+}
+
+var AlarmEventType_Types_value = map[string]int32{
+	"COMMUNICATION": 0,
+	"ENVIRONMENT":   1,
+	"EQUIPMENT":     2,
+	"SERVICE":       3,
+	"PROCESSING":    4,
+	"SECURITY":      5,
+}
+
+func (x AlarmEventType_Types) String() string {
+	return proto.EnumName(AlarmEventType_Types_name, int32(x))
+}
+
+func (AlarmEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{8, 0}
+}
+
+type AlarmEventCategory_Types int32
+
+const (
+	AlarmEventCategory_PON AlarmEventCategory_Types = 0
+	AlarmEventCategory_OLT AlarmEventCategory_Types = 1
+	AlarmEventCategory_ONT AlarmEventCategory_Types = 2
+	AlarmEventCategory_ONU AlarmEventCategory_Types = 3
+	AlarmEventCategory_NNI AlarmEventCategory_Types = 4
+)
+
+var AlarmEventCategory_Types_name = map[int32]string{
+	0: "PON",
+	1: "OLT",
+	2: "ONT",
+	3: "ONU",
+	4: "NNI",
+}
+
+var AlarmEventCategory_Types_value = map[string]int32{
+	"PON": 0,
+	"OLT": 1,
+	"ONT": 2,
+	"ONU": 3,
+	"NNI": 4,
+}
+
+func (x AlarmEventCategory_Types) String() string {
+	return proto.EnumName(AlarmEventCategory_Types_name, int32(x))
+}
+
+func (AlarmEventCategory_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{9, 0}
+}
+
+type AlarmEventState_Types int32
+
+const (
+	AlarmEventState_RAISED  AlarmEventState_Types = 0
+	AlarmEventState_CLEARED AlarmEventState_Types = 1
+)
+
+var AlarmEventState_Types_name = map[int32]string{
+	0: "RAISED",
+	1: "CLEARED",
+}
+
+var AlarmEventState_Types_value = map[string]int32{
+	"RAISED":  0,
+	"CLEARED": 1,
+}
+
+func (x AlarmEventState_Types) String() string {
+	return proto.EnumName(AlarmEventState_Types_name, int32(x))
+}
+
+func (AlarmEventState_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{10, 0}
+}
+
+type AlarmEventSeverity_Types int32
+
+const (
+	AlarmEventSeverity_INDETERMINATE AlarmEventSeverity_Types = 0
+	AlarmEventSeverity_WARNING       AlarmEventSeverity_Types = 1
+	AlarmEventSeverity_MINOR         AlarmEventSeverity_Types = 2
+	AlarmEventSeverity_MAJOR         AlarmEventSeverity_Types = 3
+	AlarmEventSeverity_CRITICAL      AlarmEventSeverity_Types = 4
+)
+
+var AlarmEventSeverity_Types_name = map[int32]string{
+	0: "INDETERMINATE",
+	1: "WARNING",
+	2: "MINOR",
+	3: "MAJOR",
+	4: "CRITICAL",
+}
+
+var AlarmEventSeverity_Types_value = map[string]int32{
+	"INDETERMINATE": 0,
+	"WARNING":       1,
+	"MINOR":         2,
+	"MAJOR":         3,
+	"CRITICAL":      4,
+}
+
+func (x AlarmEventSeverity_Types) String() string {
+	return proto.EnumName(AlarmEventSeverity_Types_name, int32(x))
+}
+
+func (AlarmEventSeverity_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{11, 0}
+}
+
+type EventCategory_Types int32
+
+const (
+	EventCategory_COMMUNICATION EventCategory_Types = 0
+	EventCategory_ENVIRONMENT   EventCategory_Types = 1
+	EventCategory_EQUIPMENT     EventCategory_Types = 2
+	EventCategory_SERVICE       EventCategory_Types = 3
+	EventCategory_PROCESSING    EventCategory_Types = 4
+	EventCategory_SECURITY      EventCategory_Types = 5
+)
+
+var EventCategory_Types_name = map[int32]string{
+	0: "COMMUNICATION",
+	1: "ENVIRONMENT",
+	2: "EQUIPMENT",
+	3: "SERVICE",
+	4: "PROCESSING",
+	5: "SECURITY",
+}
+
+var EventCategory_Types_value = map[string]int32{
+	"COMMUNICATION": 0,
+	"ENVIRONMENT":   1,
+	"EQUIPMENT":     2,
+	"SERVICE":       3,
+	"PROCESSING":    4,
+	"SECURITY":      5,
+}
+
+func (x EventCategory_Types) String() string {
+	return proto.EnumName(EventCategory_Types_name, int32(x))
+}
+
+func (EventCategory_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{14, 0}
+}
+
+type EventSubCategory_Types int32
+
+const (
+	EventSubCategory_PON EventSubCategory_Types = 0
+	EventSubCategory_OLT EventSubCategory_Types = 1
+	EventSubCategory_ONT EventSubCategory_Types = 2
+	EventSubCategory_ONU EventSubCategory_Types = 3
+	EventSubCategory_NNI EventSubCategory_Types = 4
+)
+
+var EventSubCategory_Types_name = map[int32]string{
+	0: "PON",
+	1: "OLT",
+	2: "ONT",
+	3: "ONU",
+	4: "NNI",
+}
+
+var EventSubCategory_Types_value = map[string]int32{
+	"PON": 0,
+	"OLT": 1,
+	"ONT": 2,
+	"ONU": 3,
+	"NNI": 4,
+}
+
+func (x EventSubCategory_Types) String() string {
+	return proto.EnumName(EventSubCategory_Types_name, int32(x))
+}
+
+func (EventSubCategory_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{15, 0}
+}
+
+type EventType_Types int32
+
+const (
+	EventType_CONFIG_EVENT EventType_Types = 0
+	EventType_KPI_EVENT    EventType_Types = 1
+	EventType_KPI_EVENT2   EventType_Types = 2
+	EventType_DEVICE_EVENT EventType_Types = 3
+)
+
+var EventType_Types_name = map[int32]string{
+	0: "CONFIG_EVENT",
+	1: "KPI_EVENT",
+	2: "KPI_EVENT2",
+	3: "DEVICE_EVENT",
+}
+
+var EventType_Types_value = map[string]int32{
+	"CONFIG_EVENT": 0,
+	"KPI_EVENT":    1,
+	"KPI_EVENT2":   2,
+	"DEVICE_EVENT": 3,
+}
+
+func (x EventType_Types) String() string {
+	return proto.EnumName(EventType_Types_name, int32(x))
+}
+
+func (EventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{16, 0}
+}
+
+type ConfigEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConfigEventType) Reset()         { *m = ConfigEventType{} }
+func (m *ConfigEventType) String() string { return proto.CompactTextString(m) }
+func (*ConfigEventType) ProtoMessage()    {}
+func (*ConfigEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{0}
+}
+
+func (m *ConfigEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConfigEventType.Unmarshal(m, b)
+}
+func (m *ConfigEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConfigEventType.Marshal(b, m, deterministic)
+}
+func (m *ConfigEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigEventType.Merge(m, src)
+}
+func (m *ConfigEventType) XXX_Size() int {
+	return xxx_messageInfo_ConfigEventType.Size(m)
+}
+func (m *ConfigEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigEventType proto.InternalMessageInfo
+
+type ConfigEvent struct {
+	Type                 ConfigEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=voltha.ConfigEventType_Types" json:"type,omitempty"`
+	Hash                 string                `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	Data                 string                `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ConfigEvent) Reset()         { *m = ConfigEvent{} }
+func (m *ConfigEvent) String() string { return proto.CompactTextString(m) }
+func (*ConfigEvent) ProtoMessage()    {}
+func (*ConfigEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{1}
+}
+
+func (m *ConfigEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConfigEvent.Unmarshal(m, b)
+}
+func (m *ConfigEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConfigEvent.Marshal(b, m, deterministic)
+}
+func (m *ConfigEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigEvent.Merge(m, src)
+}
+func (m *ConfigEvent) XXX_Size() int {
+	return xxx_messageInfo_ConfigEvent.Size(m)
+}
+func (m *ConfigEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigEvent proto.InternalMessageInfo
+
+func (m *ConfigEvent) GetType() ConfigEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return ConfigEventType_add
+}
+
+func (m *ConfigEvent) GetHash() string {
+	if m != nil {
+		return m.Hash
+	}
+	return ""
+}
+
+func (m *ConfigEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+type KpiEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *KpiEventType) Reset()         { *m = KpiEventType{} }
+func (m *KpiEventType) String() string { return proto.CompactTextString(m) }
+func (*KpiEventType) ProtoMessage()    {}
+func (*KpiEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{2}
+}
+
+func (m *KpiEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEventType.Unmarshal(m, b)
+}
+func (m *KpiEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEventType.Marshal(b, m, deterministic)
+}
+func (m *KpiEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEventType.Merge(m, src)
+}
+func (m *KpiEventType) XXX_Size() int {
+	return xxx_messageInfo_KpiEventType.Size(m)
+}
+func (m *KpiEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEventType proto.InternalMessageInfo
+
+//
+// Struct to convey a dictionary of metric metadata.
+type MetricMetaData struct {
+	Title           string  `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	Ts              float64 `protobuf:"fixed64,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	LogicalDeviceId string  `protobuf:"bytes,3,opt,name=logical_device_id,json=logicalDeviceId,proto3" json:"logical_device_id,omitempty"`
+	// (equivalent to the DPID that ONOS has
+	// for the VOLTHA device without the
+	//  'of:' prefix
+	SerialNo             string            `protobuf:"bytes,4,opt,name=serial_no,json=serialNo,proto3" json:"serial_no,omitempty"`
+	DeviceId             string            `protobuf:"bytes,5,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Context              map[string]string `protobuf:"bytes,6,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Uuid                 string            `protobuf:"bytes,7,opt,name=uuid,proto3" json:"uuid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *MetricMetaData) Reset()         { *m = MetricMetaData{} }
+func (m *MetricMetaData) String() string { return proto.CompactTextString(m) }
+func (*MetricMetaData) ProtoMessage()    {}
+func (*MetricMetaData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{3}
+}
+
+func (m *MetricMetaData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricMetaData.Unmarshal(m, b)
+}
+func (m *MetricMetaData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricMetaData.Marshal(b, m, deterministic)
+}
+func (m *MetricMetaData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricMetaData.Merge(m, src)
+}
+func (m *MetricMetaData) XXX_Size() int {
+	return xxx_messageInfo_MetricMetaData.Size(m)
+}
+func (m *MetricMetaData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricMetaData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricMetaData proto.InternalMessageInfo
+
+func (m *MetricMetaData) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetTs() float64 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *MetricMetaData) GetLogicalDeviceId() string {
+	if m != nil {
+		return m.LogicalDeviceId
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetSerialNo() string {
+	if m != nil {
+		return m.SerialNo
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+func (m *MetricMetaData) GetUuid() string {
+	if m != nil {
+		return m.Uuid
+	}
+	return ""
+}
+
+//
+// Struct to convey a dictionary of metric->value pairs. Typically used in
+// pure shared-timestamp or shared-timestamp + shared object prefix situations.
+type MetricValuePairs struct {
+	// Metric / value pairs.
+	Metrics              map[string]float32 `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MetricValuePairs) Reset()         { *m = MetricValuePairs{} }
+func (m *MetricValuePairs) String() string { return proto.CompactTextString(m) }
+func (*MetricValuePairs) ProtoMessage()    {}
+func (*MetricValuePairs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{4}
+}
+
+func (m *MetricValuePairs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricValuePairs.Unmarshal(m, b)
+}
+func (m *MetricValuePairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricValuePairs.Marshal(b, m, deterministic)
+}
+func (m *MetricValuePairs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricValuePairs.Merge(m, src)
+}
+func (m *MetricValuePairs) XXX_Size() int {
+	return xxx_messageInfo_MetricValuePairs.Size(m)
+}
+func (m *MetricValuePairs) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricValuePairs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricValuePairs proto.InternalMessageInfo
+
+func (m *MetricValuePairs) GetMetrics() map[string]float32 {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+//
+// Struct to group metadata for a metric (or group of metrics) with the key-value
+// pairs of collected metrics
+type MetricInformation struct {
+	Metadata             *MetricMetaData    `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	Metrics              map[string]float32 `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MetricInformation) Reset()         { *m = MetricInformation{} }
+func (m *MetricInformation) String() string { return proto.CompactTextString(m) }
+func (*MetricInformation) ProtoMessage()    {}
+func (*MetricInformation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{5}
+}
+
+func (m *MetricInformation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricInformation.Unmarshal(m, b)
+}
+func (m *MetricInformation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricInformation.Marshal(b, m, deterministic)
+}
+func (m *MetricInformation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricInformation.Merge(m, src)
+}
+func (m *MetricInformation) XXX_Size() int {
+	return xxx_messageInfo_MetricInformation.Size(m)
+}
+func (m *MetricInformation) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricInformation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricInformation proto.InternalMessageInfo
+
+func (m *MetricInformation) GetMetadata() *MetricMetaData {
+	if m != nil {
+		return m.Metadata
+	}
+	return nil
+}
+
+func (m *MetricInformation) GetMetrics() map[string]float32 {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+//
+// Legacy KPI Event structured.  In mid-August, the KPI event format was updated
+//                               to a more easily parsable format. See VOL-1140
+//                               for more information.
+type KpiEvent struct {
+	Type                 KpiEventType_Types           `protobuf:"varint,1,opt,name=type,proto3,enum=voltha.KpiEventType_Types" json:"type,omitempty"`
+	Ts                   float32                      `protobuf:"fixed32,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	Prefixes             map[string]*MetricValuePairs `protobuf:"bytes,3,rep,name=prefixes,proto3" json:"prefixes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *KpiEvent) Reset()         { *m = KpiEvent{} }
+func (m *KpiEvent) String() string { return proto.CompactTextString(m) }
+func (*KpiEvent) ProtoMessage()    {}
+func (*KpiEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{6}
+}
+
+func (m *KpiEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEvent.Unmarshal(m, b)
+}
+func (m *KpiEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEvent.Marshal(b, m, deterministic)
+}
+func (m *KpiEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEvent.Merge(m, src)
+}
+func (m *KpiEvent) XXX_Size() int {
+	return xxx_messageInfo_KpiEvent.Size(m)
+}
+func (m *KpiEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEvent proto.InternalMessageInfo
+
+func (m *KpiEvent) GetType() KpiEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return KpiEventType_slice
+}
+
+func (m *KpiEvent) GetTs() float32 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *KpiEvent) GetPrefixes() map[string]*MetricValuePairs {
+	if m != nil {
+		return m.Prefixes
+	}
+	return nil
+}
+
+type KpiEvent2 struct {
+	// Type of KPI Event
+	Type KpiEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=voltha.KpiEventType_Types" json:"type,omitempty"`
+	// Fields used when for slice:
+	Ts                   float64              `protobuf:"fixed64,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	SliceData            []*MetricInformation `protobuf:"bytes,3,rep,name=slice_data,json=sliceData,proto3" json:"slice_data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *KpiEvent2) Reset()         { *m = KpiEvent2{} }
+func (m *KpiEvent2) String() string { return proto.CompactTextString(m) }
+func (*KpiEvent2) ProtoMessage()    {}
+func (*KpiEvent2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{7}
+}
+
+func (m *KpiEvent2) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEvent2.Unmarshal(m, b)
+}
+func (m *KpiEvent2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEvent2.Marshal(b, m, deterministic)
+}
+func (m *KpiEvent2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEvent2.Merge(m, src)
+}
+func (m *KpiEvent2) XXX_Size() int {
+	return xxx_messageInfo_KpiEvent2.Size(m)
+}
+func (m *KpiEvent2) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEvent2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEvent2 proto.InternalMessageInfo
+
+func (m *KpiEvent2) GetType() KpiEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return KpiEventType_slice
+}
+
+func (m *KpiEvent2) GetTs() float64 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *KpiEvent2) GetSliceData() []*MetricInformation {
+	if m != nil {
+		return m.SliceData
+	}
+	return nil
+}
+
+//
+// Identify to the area of the system impacted by the alarm
+// To be deprecated once python version of OpenOLT adapter
+// moves to the new event defination for device alarms
+type AlarmEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmEventType) Reset()         { *m = AlarmEventType{} }
+func (m *AlarmEventType) String() string { return proto.CompactTextString(m) }
+func (*AlarmEventType) ProtoMessage()    {}
+func (*AlarmEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{8}
+}
+
+func (m *AlarmEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmEventType.Unmarshal(m, b)
+}
+func (m *AlarmEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmEventType.Marshal(b, m, deterministic)
+}
+func (m *AlarmEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmEventType.Merge(m, src)
+}
+func (m *AlarmEventType) XXX_Size() int {
+	return xxx_messageInfo_AlarmEventType.Size(m)
+}
+func (m *AlarmEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmEventType proto.InternalMessageInfo
+
+//
+// Identify to the functional category originating the alarm
+// To be deprecated once python version of OpenOLT adapter
+// as well as OpenONU adapter moves to the new event
+// defination for device alarms
+type AlarmEventCategory struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmEventCategory) Reset()         { *m = AlarmEventCategory{} }
+func (m *AlarmEventCategory) String() string { return proto.CompactTextString(m) }
+func (*AlarmEventCategory) ProtoMessage()    {}
+func (*AlarmEventCategory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{9}
+}
+
+func (m *AlarmEventCategory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmEventCategory.Unmarshal(m, b)
+}
+func (m *AlarmEventCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmEventCategory.Marshal(b, m, deterministic)
+}
+func (m *AlarmEventCategory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmEventCategory.Merge(m, src)
+}
+func (m *AlarmEventCategory) XXX_Size() int {
+	return xxx_messageInfo_AlarmEventCategory.Size(m)
+}
+func (m *AlarmEventCategory) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmEventCategory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmEventCategory proto.InternalMessageInfo
+
+//
+// Active state of the alarm
+// To be deprecated once python version of OpenOLT adapter
+// as well as OpenONU adapter moves to the new event
+// defination for device alarms
+type AlarmEventState struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmEventState) Reset()         { *m = AlarmEventState{} }
+func (m *AlarmEventState) String() string { return proto.CompactTextString(m) }
+func (*AlarmEventState) ProtoMessage()    {}
+func (*AlarmEventState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{10}
+}
+
+func (m *AlarmEventState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmEventState.Unmarshal(m, b)
+}
+func (m *AlarmEventState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmEventState.Marshal(b, m, deterministic)
+}
+func (m *AlarmEventState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmEventState.Merge(m, src)
+}
+func (m *AlarmEventState) XXX_Size() int {
+	return xxx_messageInfo_AlarmEventState.Size(m)
+}
+func (m *AlarmEventState) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmEventState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmEventState proto.InternalMessageInfo
+
+//
+// Identify the overall impact of the alarm on the system
+// To be deprecated once python version of OpenOLT adapter
+// as well as OpenONU adapter moves to the new event
+// defination for device alarms
+type AlarmEventSeverity struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmEventSeverity) Reset()         { *m = AlarmEventSeverity{} }
+func (m *AlarmEventSeverity) String() string { return proto.CompactTextString(m) }
+func (*AlarmEventSeverity) ProtoMessage()    {}
+func (*AlarmEventSeverity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{11}
+}
+
+func (m *AlarmEventSeverity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmEventSeverity.Unmarshal(m, b)
+}
+func (m *AlarmEventSeverity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmEventSeverity.Marshal(b, m, deterministic)
+}
+func (m *AlarmEventSeverity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmEventSeverity.Merge(m, src)
+}
+func (m *AlarmEventSeverity) XXX_Size() int {
+	return xxx_messageInfo_AlarmEventSeverity.Size(m)
+}
+func (m *AlarmEventSeverity) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmEventSeverity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmEventSeverity proto.InternalMessageInfo
+
+//
+// To be deprecated once python version of OpenOLT adapter
+// as well as OpenONU adapter moves to the new event
+// defination for device alarms
+type AlarmEvent struct {
+	// Unique ID for this alarm.  e.g. voltha.some_olt.1234
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Refers to the area of the system impacted by the alarm
+	Type AlarmEventType_Types `protobuf:"varint,2,opt,name=type,proto3,enum=voltha.AlarmEventType_Types" json:"type,omitempty"`
+	// Refers to functional category of the alarm
+	Category AlarmEventCategory_Types `protobuf:"varint,3,opt,name=category,proto3,enum=voltha.AlarmEventCategory_Types" json:"category,omitempty"`
+	// Current active state of the alarm
+	State AlarmEventState_Types `protobuf:"varint,4,opt,name=state,proto3,enum=voltha.AlarmEventState_Types" json:"state,omitempty"`
+	// Overall impact of the alarm on the system
+	Severity AlarmEventSeverity_Types `protobuf:"varint,5,opt,name=severity,proto3,enum=voltha.AlarmEventSeverity_Types" json:"severity,omitempty"`
+	// Timestamp at which the alarm was first raised
+	// TODO: Is this obsolete? Eventheader already has a raised_ts
+	RaisedTs *timestamp.Timestamp `protobuf:"bytes,6,opt,name=raised_ts,json=raisedTs,proto3" json:"raised_ts,omitempty"`
+	// Timestamp at which the alarm was reported
+	// TODO: Is this obsolete? Eventheader already has a reported_ts
+	ReportedTs *timestamp.Timestamp `protobuf:"bytes,7,opt,name=reported_ts,json=reportedTs,proto3" json:"reported_ts,omitempty"`
+	// Timestamp at which the alarm has changed since it was raised
+	ChangedTs *timestamp.Timestamp `protobuf:"bytes,8,opt,name=changed_ts,json=changedTs,proto3" json:"changed_ts,omitempty"`
+	// Identifier of the originating resource of the alarm
+	ResourceId string `protobuf:"bytes,9,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+	// Textual explanation of the alarm
+	Description string `protobuf:"bytes,10,opt,name=description,proto3" json:"description,omitempty"`
+	// Key/Value storage for extra information that may give context to the alarm
+	Context map[string]string `protobuf:"bytes,11,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// logical device id
+	LogicalDeviceId string `protobuf:"bytes,12,opt,name=logical_device_id,json=logicalDeviceId,proto3" json:"logical_device_id,omitempty"`
+	// alarm_type  name indicates clearly the name of the alarm
+	AlarmTypeName        string   `protobuf:"bytes,13,opt,name=alarm_type_name,json=alarmTypeName,proto3" json:"alarm_type_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmEvent) Reset()         { *m = AlarmEvent{} }
+func (m *AlarmEvent) String() string { return proto.CompactTextString(m) }
+func (*AlarmEvent) ProtoMessage()    {}
+func (*AlarmEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{12}
+}
+
+func (m *AlarmEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmEvent.Unmarshal(m, b)
+}
+func (m *AlarmEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmEvent.Marshal(b, m, deterministic)
+}
+func (m *AlarmEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmEvent.Merge(m, src)
+}
+func (m *AlarmEvent) XXX_Size() int {
+	return xxx_messageInfo_AlarmEvent.Size(m)
+}
+func (m *AlarmEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmEvent proto.InternalMessageInfo
+
+func (m *AlarmEvent) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *AlarmEvent) GetType() AlarmEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return AlarmEventType_COMMUNICATION
+}
+
+func (m *AlarmEvent) GetCategory() AlarmEventCategory_Types {
+	if m != nil {
+		return m.Category
+	}
+	return AlarmEventCategory_PON
+}
+
+func (m *AlarmEvent) GetState() AlarmEventState_Types {
+	if m != nil {
+		return m.State
+	}
+	return AlarmEventState_RAISED
+}
+
+func (m *AlarmEvent) GetSeverity() AlarmEventSeverity_Types {
+	if m != nil {
+		return m.Severity
+	}
+	return AlarmEventSeverity_INDETERMINATE
+}
+
+func (m *AlarmEvent) GetRaisedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.RaisedTs
+	}
+	return nil
+}
+
+func (m *AlarmEvent) GetReportedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.ReportedTs
+	}
+	return nil
+}
+
+func (m *AlarmEvent) GetChangedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.ChangedTs
+	}
+	return nil
+}
+
+func (m *AlarmEvent) GetResourceId() string {
+	if m != nil {
+		return m.ResourceId
+	}
+	return ""
+}
+
+func (m *AlarmEvent) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *AlarmEvent) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+func (m *AlarmEvent) GetLogicalDeviceId() string {
+	if m != nil {
+		return m.LogicalDeviceId
+	}
+	return ""
+}
+
+func (m *AlarmEvent) GetAlarmTypeName() string {
+	if m != nil {
+		return m.AlarmTypeName
+	}
+	return ""
+}
+
+//
+// Describes the events specific to device
+type DeviceEvent struct {
+	// Identifier of the originating resource of the event, for ex: device_id
+	ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+	// device_event_name indicates clearly the name of the device event
+	DeviceEventName string `protobuf:"bytes,2,opt,name=device_event_name,json=deviceEventName,proto3" json:"device_event_name,omitempty"`
+	// Textual explanation of the device event
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	// Key/Value storage for extra information that may give context to the event
+	Context              map[string]string `protobuf:"bytes,4,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeviceEvent) Reset()         { *m = DeviceEvent{} }
+func (m *DeviceEvent) String() string { return proto.CompactTextString(m) }
+func (*DeviceEvent) ProtoMessage()    {}
+func (*DeviceEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{13}
+}
+
+func (m *DeviceEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceEvent.Unmarshal(m, b)
+}
+func (m *DeviceEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceEvent.Marshal(b, m, deterministic)
+}
+func (m *DeviceEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceEvent.Merge(m, src)
+}
+func (m *DeviceEvent) XXX_Size() int {
+	return xxx_messageInfo_DeviceEvent.Size(m)
+}
+func (m *DeviceEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceEvent proto.InternalMessageInfo
+
+func (m *DeviceEvent) GetResourceId() string {
+	if m != nil {
+		return m.ResourceId
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetDeviceEventName() string {
+	if m != nil {
+		return m.DeviceEventName
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+//
+// Identify the area of the system impacted by the event.
+type EventCategory struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventCategory) Reset()         { *m = EventCategory{} }
+func (m *EventCategory) String() string { return proto.CompactTextString(m) }
+func (*EventCategory) ProtoMessage()    {}
+func (*EventCategory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{14}
+}
+
+func (m *EventCategory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventCategory.Unmarshal(m, b)
+}
+func (m *EventCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventCategory.Marshal(b, m, deterministic)
+}
+func (m *EventCategory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventCategory.Merge(m, src)
+}
+func (m *EventCategory) XXX_Size() int {
+	return xxx_messageInfo_EventCategory.Size(m)
+}
+func (m *EventCategory) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventCategory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventCategory proto.InternalMessageInfo
+
+//
+// Identify the functional category originating the event
+type EventSubCategory struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventSubCategory) Reset()         { *m = EventSubCategory{} }
+func (m *EventSubCategory) String() string { return proto.CompactTextString(m) }
+func (*EventSubCategory) ProtoMessage()    {}
+func (*EventSubCategory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{15}
+}
+
+func (m *EventSubCategory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventSubCategory.Unmarshal(m, b)
+}
+func (m *EventSubCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventSubCategory.Marshal(b, m, deterministic)
+}
+func (m *EventSubCategory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventSubCategory.Merge(m, src)
+}
+func (m *EventSubCategory) XXX_Size() int {
+	return xxx_messageInfo_EventSubCategory.Size(m)
+}
+func (m *EventSubCategory) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventSubCategory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventSubCategory proto.InternalMessageInfo
+
+//
+// Identify the type of event
+type EventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventType) Reset()         { *m = EventType{} }
+func (m *EventType) String() string { return proto.CompactTextString(m) }
+func (*EventType) ProtoMessage()    {}
+func (*EventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{16}
+}
+
+func (m *EventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventType.Unmarshal(m, b)
+}
+func (m *EventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventType.Marshal(b, m, deterministic)
+}
+func (m *EventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventType.Merge(m, src)
+}
+func (m *EventType) XXX_Size() int {
+	return xxx_messageInfo_EventType.Size(m)
+}
+func (m *EventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventType proto.InternalMessageInfo
+
+//
+// Identify the functional category originating the event
+type EventHeader struct {
+	// Unique ID for this event.  e.g. voltha.some_olt.1234
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Refers to the functional area affect by the event
+	Category EventCategory_Types `protobuf:"varint,2,opt,name=category,proto3,enum=voltha.EventCategory_Types" json:"category,omitempty"`
+	// Refers to functional category of the event
+	SubCategory EventSubCategory_Types `protobuf:"varint,3,opt,name=sub_category,json=subCategory,proto3,enum=voltha.EventSubCategory_Types" json:"sub_category,omitempty"`
+	// Refers to the type of the event
+	Type EventType_Types `protobuf:"varint,4,opt,name=type,proto3,enum=voltha.EventType_Types" json:"type,omitempty"`
+	// The version identifier for this event type, thus allowing each
+	// event type to evolve independently. The version should be in the
+	// format “MAJOR.MINOR” format and minor changes must only be additive
+	// and non-breaking.
+	TypeVersion string `protobuf:"bytes,5,opt,name=type_version,json=typeVersion,proto3" json:"type_version,omitempty"`
+	// Timestamp at which the event was first raised.
+	// This represents the UTC time stamp since epoch (in seconds) when the
+	// the event was first raised from the source entity.
+	// If the source entity doesn't send the raised_ts, this shall be set
+	// to timestamp when the event was received.
+	RaisedTs *timestamp.Timestamp `protobuf:"bytes,6,opt,name=raised_ts,json=raisedTs,proto3" json:"raised_ts,omitempty"`
+	// Timestamp at which the event was reported.
+	// This represents the UTC time stamp since epoch (in seconds) when the
+	// the event was reported (this time stamp is >= raised_ts).
+	// If the source entity that reported this event doesn't send the
+	// reported_ts, this shall be set to the same value as raised_ts.
+	ReportedTs           *timestamp.Timestamp `protobuf:"bytes,7,opt,name=reported_ts,json=reportedTs,proto3" json:"reported_ts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *EventHeader) Reset()         { *m = EventHeader{} }
+func (m *EventHeader) String() string { return proto.CompactTextString(m) }
+func (*EventHeader) ProtoMessage()    {}
+func (*EventHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{17}
+}
+
+func (m *EventHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventHeader.Unmarshal(m, b)
+}
+func (m *EventHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventHeader.Marshal(b, m, deterministic)
+}
+func (m *EventHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventHeader.Merge(m, src)
+}
+func (m *EventHeader) XXX_Size() int {
+	return xxx_messageInfo_EventHeader.Size(m)
+}
+func (m *EventHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventHeader proto.InternalMessageInfo
+
+func (m *EventHeader) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *EventHeader) GetCategory() EventCategory_Types {
+	if m != nil {
+		return m.Category
+	}
+	return EventCategory_COMMUNICATION
+}
+
+func (m *EventHeader) GetSubCategory() EventSubCategory_Types {
+	if m != nil {
+		return m.SubCategory
+	}
+	return EventSubCategory_PON
+}
+
+func (m *EventHeader) GetType() EventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return EventType_CONFIG_EVENT
+}
+
+func (m *EventHeader) GetTypeVersion() string {
+	if m != nil {
+		return m.TypeVersion
+	}
+	return ""
+}
+
+func (m *EventHeader) GetRaisedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.RaisedTs
+	}
+	return nil
+}
+
+func (m *EventHeader) GetReportedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.ReportedTs
+	}
+	return nil
+}
+
+//
+// Event Structure
+type Event struct {
+	// event header
+	Header *EventHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// oneof event types referred by EventType.
+	//
+	// Types that are valid to be assigned to EventType:
+	//	*Event_ConfigEvent
+	//	*Event_KpiEvent
+	//	*Event_KpiEvent2
+	//	*Event_DeviceEvent
+	EventType            isEvent_EventType `protobuf_oneof:"event_type"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Event) Reset()         { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage()    {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{18}
+}
+
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Event.Unmarshal(m, b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return xxx_messageInfo_Event.Size(m)
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func (m *Event) GetHeader() *EventHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type isEvent_EventType interface {
+	isEvent_EventType()
+}
+
+type Event_ConfigEvent struct {
+	ConfigEvent *ConfigEvent `protobuf:"bytes,2,opt,name=config_event,json=configEvent,proto3,oneof"`
+}
+
+type Event_KpiEvent struct {
+	KpiEvent *KpiEvent `protobuf:"bytes,3,opt,name=kpi_event,json=kpiEvent,proto3,oneof"`
+}
+
+type Event_KpiEvent2 struct {
+	KpiEvent2 *KpiEvent2 `protobuf:"bytes,4,opt,name=kpi_event2,json=kpiEvent2,proto3,oneof"`
+}
+
+type Event_DeviceEvent struct {
+	DeviceEvent *DeviceEvent `protobuf:"bytes,5,opt,name=device_event,json=deviceEvent,proto3,oneof"`
+}
+
+func (*Event_ConfigEvent) isEvent_EventType() {}
+
+func (*Event_KpiEvent) isEvent_EventType() {}
+
+func (*Event_KpiEvent2) isEvent_EventType() {}
+
+func (*Event_DeviceEvent) isEvent_EventType() {}
+
+func (m *Event) GetEventType() isEvent_EventType {
+	if m != nil {
+		return m.EventType
+	}
+	return nil
+}
+
+func (m *Event) GetConfigEvent() *ConfigEvent {
+	if x, ok := m.GetEventType().(*Event_ConfigEvent); ok {
+		return x.ConfigEvent
+	}
+	return nil
+}
+
+func (m *Event) GetKpiEvent() *KpiEvent {
+	if x, ok := m.GetEventType().(*Event_KpiEvent); ok {
+		return x.KpiEvent
+	}
+	return nil
+}
+
+func (m *Event) GetKpiEvent2() *KpiEvent2 {
+	if x, ok := m.GetEventType().(*Event_KpiEvent2); ok {
+		return x.KpiEvent2
+	}
+	return nil
+}
+
+func (m *Event) GetDeviceEvent() *DeviceEvent {
+	if x, ok := m.GetEventType().(*Event_DeviceEvent); ok {
+		return x.DeviceEvent
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Event) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Event_ConfigEvent)(nil),
+		(*Event_KpiEvent)(nil),
+		(*Event_KpiEvent2)(nil),
+		(*Event_DeviceEvent)(nil),
+	}
+}
+
+func init() {
+	proto.RegisterEnum("voltha.ConfigEventType_Types", ConfigEventType_Types_name, ConfigEventType_Types_value)
+	proto.RegisterEnum("voltha.KpiEventType_Types", KpiEventType_Types_name, KpiEventType_Types_value)
+	proto.RegisterEnum("voltha.AlarmEventType_Types", AlarmEventType_Types_name, AlarmEventType_Types_value)
+	proto.RegisterEnum("voltha.AlarmEventCategory_Types", AlarmEventCategory_Types_name, AlarmEventCategory_Types_value)
+	proto.RegisterEnum("voltha.AlarmEventState_Types", AlarmEventState_Types_name, AlarmEventState_Types_value)
+	proto.RegisterEnum("voltha.AlarmEventSeverity_Types", AlarmEventSeverity_Types_name, AlarmEventSeverity_Types_value)
+	proto.RegisterEnum("voltha.EventCategory_Types", EventCategory_Types_name, EventCategory_Types_value)
+	proto.RegisterEnum("voltha.EventSubCategory_Types", EventSubCategory_Types_name, EventSubCategory_Types_value)
+	proto.RegisterEnum("voltha.EventType_Types", EventType_Types_name, EventType_Types_value)
+	proto.RegisterType((*ConfigEventType)(nil), "voltha.ConfigEventType")
+	proto.RegisterType((*ConfigEvent)(nil), "voltha.ConfigEvent")
+	proto.RegisterType((*KpiEventType)(nil), "voltha.KpiEventType")
+	proto.RegisterType((*MetricMetaData)(nil), "voltha.MetricMetaData")
+	proto.RegisterMapType((map[string]string)(nil), "voltha.MetricMetaData.ContextEntry")
+	proto.RegisterType((*MetricValuePairs)(nil), "voltha.MetricValuePairs")
+	proto.RegisterMapType((map[string]float32)(nil), "voltha.MetricValuePairs.MetricsEntry")
+	proto.RegisterType((*MetricInformation)(nil), "voltha.MetricInformation")
+	proto.RegisterMapType((map[string]float32)(nil), "voltha.MetricInformation.MetricsEntry")
+	proto.RegisterType((*KpiEvent)(nil), "voltha.KpiEvent")
+	proto.RegisterMapType((map[string]*MetricValuePairs)(nil), "voltha.KpiEvent.PrefixesEntry")
+	proto.RegisterType((*KpiEvent2)(nil), "voltha.KpiEvent2")
+	proto.RegisterType((*AlarmEventType)(nil), "voltha.AlarmEventType")
+	proto.RegisterType((*AlarmEventCategory)(nil), "voltha.AlarmEventCategory")
+	proto.RegisterType((*AlarmEventState)(nil), "voltha.AlarmEventState")
+	proto.RegisterType((*AlarmEventSeverity)(nil), "voltha.AlarmEventSeverity")
+	proto.RegisterType((*AlarmEvent)(nil), "voltha.AlarmEvent")
+	proto.RegisterMapType((map[string]string)(nil), "voltha.AlarmEvent.ContextEntry")
+	proto.RegisterType((*DeviceEvent)(nil), "voltha.DeviceEvent")
+	proto.RegisterMapType((map[string]string)(nil), "voltha.DeviceEvent.ContextEntry")
+	proto.RegisterType((*EventCategory)(nil), "voltha.EventCategory")
+	proto.RegisterType((*EventSubCategory)(nil), "voltha.EventSubCategory")
+	proto.RegisterType((*EventType)(nil), "voltha.EventType")
+	proto.RegisterType((*EventHeader)(nil), "voltha.EventHeader")
+	proto.RegisterType((*Event)(nil), "voltha.Event")
+}
+
+func init() { proto.RegisterFile("voltha_protos/events.proto", fileDescriptor_e63e6c07044fd2c4) }
+
+var fileDescriptor_e63e6c07044fd2c4 = []byte{
+	// 1388 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x5d, 0x6e, 0xdb, 0xc6,
+	0x16, 0x16, 0xa9, 0xff, 0x43, 0xd9, 0xa6, 0x27, 0x17, 0xf7, 0xea, 0x2a, 0xb9, 0x89, 0x2f, 0x8b,
+	0x06, 0x46, 0x82, 0x4a, 0xad, 0x5c, 0x20, 0x8e, 0xd3, 0xa2, 0x55, 0x64, 0x36, 0x66, 0x12, 0x51,
+	0x2e, 0x25, 0x3b, 0x68, 0x5f, 0x84, 0xb1, 0x38, 0x96, 0x08, 0x4b, 0xa2, 0x40, 0x8e, 0xd4, 0x78,
+	0x01, 0x7d, 0xee, 0x02, 0xba, 0x84, 0xee, 0xa1, 0x6f, 0x5d, 0x46, 0xd1, 0x4d, 0x74, 0x01, 0xc5,
+	0xfc, 0x50, 0x24, 0x65, 0xa5, 0x79, 0x30, 0xda, 0x3e, 0x69, 0x78, 0xe6, 0x7c, 0xe7, 0xe7, 0x3b,
+	0x73, 0xce, 0x8c, 0xa0, 0xb6, 0xf4, 0x27, 0x74, 0x8c, 0x07, 0xf3, 0xc0, 0xa7, 0x7e, 0xd8, 0x20,
+	0x4b, 0x32, 0xa3, 0x61, 0x9d, 0x7f, 0xa1, 0x82, 0xd8, 0xab, 0x55, 0xd3, 0x3a, 0x53, 0x42, 0xb1,
+	0xd0, 0xa8, 0xdd, 0x1b, 0xf9, 0xfe, 0x68, 0x42, 0x1a, 0x78, 0xee, 0x35, 0xf0, 0x6c, 0xe6, 0x53,
+	0x4c, 0x3d, 0x7f, 0x26, 0xf1, 0xb5, 0x07, 0x72, 0x97, 0x7f, 0x5d, 0x2c, 0x2e, 0x1b, 0xd4, 0x9b,
+	0x92, 0x90, 0xe2, 0xe9, 0x5c, 0x28, 0x18, 0xcf, 0x60, 0xa7, 0xed, 0xcf, 0x2e, 0xbd, 0x91, 0xc9,
+	0xdc, 0xf6, 0xaf, 0xe7, 0xc4, 0xd8, 0x87, 0x3c, 0xfb, 0x0d, 0x51, 0x11, 0xb2, 0xd8, 0x75, 0xf5,
+	0x0c, 0x02, 0x28, 0x04, 0x64, 0xea, 0x2f, 0x89, 0xae, 0xb0, 0xf5, 0x62, 0xee, 0x62, 0x4a, 0x74,
+	0xd5, 0x18, 0x83, 0x96, 0x00, 0xa3, 0x4f, 0x20, 0x47, 0xaf, 0xe7, 0xa4, 0xaa, 0xec, 0x29, 0xfb,
+	0xdb, 0xcd, 0xff, 0xd5, 0x45, 0xcc, 0xf5, 0x35, 0xfb, 0x75, 0x6e, 0xdc, 0xe1, 0xaa, 0x08, 0x41,
+	0x6e, 0x8c, 0xc3, 0x71, 0x55, 0xdd, 0x53, 0xf6, 0xcb, 0x0e, 0x5f, 0x33, 0x99, 0x8b, 0x29, 0xae,
+	0x66, 0x85, 0x8c, 0xad, 0x8d, 0x47, 0x50, 0x79, 0x35, 0xf7, 0xe2, 0x18, 0x6b, 0x51, 0x8c, 0x65,
+	0xc8, 0x87, 0x13, 0x6f, 0x48, 0xf4, 0x0c, 0x2a, 0x80, 0x4a, 0x43, 0x5d, 0x31, 0x7e, 0x52, 0x61,
+	0xbb, 0x43, 0x68, 0xe0, 0x0d, 0x3b, 0x84, 0xe2, 0x63, 0x4c, 0x31, 0xfa, 0x17, 0xe4, 0xa9, 0x47,
+	0x27, 0x22, 0xb4, 0xb2, 0x23, 0x3e, 0xd0, 0x36, 0x03, 0x70, 0xd7, 0x8a, 0xa3, 0xd2, 0x10, 0x3d,
+	0x82, 0xdd, 0x89, 0x3f, 0xf2, 0x86, 0x78, 0x32, 0x70, 0xc9, 0xd2, 0x1b, 0x92, 0x81, 0xe7, 0xca,
+	0x28, 0x76, 0xe4, 0xc6, 0x31, 0x97, 0x5b, 0x2e, 0xba, 0x0b, 0xe5, 0x90, 0x04, 0x1e, 0x9e, 0x0c,
+	0x66, 0x7e, 0x35, 0xc7, 0x75, 0x4a, 0x42, 0x60, 0xfb, 0x6c, 0x33, 0x36, 0x90, 0x17, 0x9b, 0x6e,
+	0x84, 0xfc, 0x1c, 0x8a, 0x43, 0x7f, 0x46, 0xc9, 0x5b, 0x5a, 0x2d, 0xec, 0x65, 0xf7, 0xb5, 0xe6,
+	0x07, 0x11, 0x51, 0xe9, 0xa0, 0x19, 0x6f, 0x4c, 0xcb, 0x9c, 0xd1, 0xe0, 0xda, 0x89, 0x30, 0x8c,
+	0x9d, 0xc5, 0xc2, 0x73, 0xab, 0x45, 0xc1, 0x0e, 0x5b, 0xd7, 0x8e, 0xa0, 0x92, 0x54, 0x46, 0x3a,
+	0x64, 0xaf, 0xc8, 0xb5, 0x4c, 0x96, 0x2d, 0x19, 0x01, 0x4b, 0x3c, 0x59, 0x10, 0x49, 0xb4, 0xf8,
+	0x38, 0x52, 0x0f, 0x15, 0xe3, 0x07, 0x05, 0x74, 0xe1, 0xf8, 0x9c, 0xc9, 0x4e, 0xb1, 0x17, 0x84,
+	0xe8, 0x0b, 0x28, 0x4e, 0xb9, 0x2c, 0xac, 0x2a, 0x3c, 0xc6, 0x0f, 0xd3, 0x31, 0xc6, 0xaa, 0x52,
+	0x10, 0xca, 0x28, 0x25, 0x8a, 0x45, 0x94, 0xdc, 0x78, 0x5f, 0x44, 0x6a, 0x32, 0xa2, 0x5f, 0x14,
+	0xd8, 0x15, 0x60, 0x6b, 0x76, 0xe9, 0x07, 0x53, 0x7e, 0xa0, 0x51, 0x13, 0x4a, 0xec, 0xd4, 0xf3,
+	0x93, 0xc1, 0xcc, 0x68, 0xcd, 0x7f, 0x6f, 0xe6, 0xcd, 0x59, 0xe9, 0xa1, 0x2f, 0xe3, 0x34, 0x54,
+	0x9e, 0xc6, 0xc3, 0x34, 0x24, 0x61, 0xff, 0x2f, 0xc8, 0xe3, 0x57, 0x05, 0x4a, 0xd1, 0xa1, 0x45,
+	0xf5, 0x54, 0x6f, 0xd4, 0xa2, 0x38, 0x92, 0x87, 0x3a, 0xd5, 0x18, 0xf1, 0xd9, 0x54, 0xf9, 0xd9,
+	0x3c, 0x82, 0xd2, 0x3c, 0x20, 0x97, 0xde, 0x5b, 0x12, 0x56, 0xb3, 0x3c, 0x97, 0xfb, 0xeb, 0x36,
+	0xea, 0xa7, 0x52, 0x41, 0xe4, 0xb0, 0xd2, 0xaf, 0x9d, 0xc1, 0x56, 0x6a, 0x6b, 0x43, 0x16, 0xf5,
+	0x64, 0x16, 0x5a, 0xb3, 0xfa, 0xae, 0x72, 0x27, 0xf3, 0xfb, 0x5e, 0x81, 0x72, 0xe4, 0xbb, 0x79,
+	0x8b, 0x04, 0x45, 0xf3, 0x1d, 0x02, 0xf0, 0x46, 0x1e, 0xc8, 0xde, 0x67, 0x29, 0xfe, 0xf7, 0x9d,
+	0xe5, 0x72, 0xca, 0x5c, 0x99, 0xd5, 0xdb, 0xf8, 0x0e, 0xb6, 0x5b, 0x13, 0x1c, 0x4c, 0xe3, 0xe9,
+	0x40, 0xa2, 0xe9, 0xb0, 0x0b, 0x5b, 0xed, 0x6e, 0xa7, 0x73, 0x66, 0x5b, 0xed, 0x56, 0xdf, 0xea,
+	0xda, 0x7a, 0x06, 0xed, 0x80, 0x66, 0xda, 0xe7, 0x96, 0xd3, 0xb5, 0x3b, 0xa6, 0xdd, 0xd7, 0x15,
+	0xb4, 0x05, 0x65, 0xf3, 0xeb, 0x33, 0xeb, 0x94, 0x7f, 0xaa, 0x48, 0x83, 0x62, 0xcf, 0x74, 0xce,
+	0xad, 0xb6, 0xa9, 0x67, 0xd1, 0x36, 0xc0, 0xa9, 0xd3, 0x6d, 0x9b, 0xbd, 0x9e, 0x65, 0xbf, 0xd0,
+	0x73, 0xa8, 0x02, 0xa5, 0x9e, 0xd9, 0x3e, 0x73, 0xac, 0xfe, 0x37, 0x7a, 0xde, 0x78, 0x09, 0x28,
+	0x76, 0xdc, 0xc6, 0x94, 0x8c, 0xfc, 0xe0, 0xda, 0xf8, 0x34, 0x31, 0x3e, 0x4f, 0xb9, 0xcb, 0x22,
+	0x64, 0xbb, 0xaf, 0x99, 0x2b, 0xb6, 0xe0, 0x4e, 0xf8, 0xe2, 0x4c, 0xcf, 0xb2, 0x85, 0x6d, 0x5b,
+	0x7a, 0xce, 0x38, 0x80, 0x9d, 0xd8, 0x56, 0x8f, 0x62, 0x4a, 0x8c, 0xbd, 0xc8, 0x10, 0x40, 0xc1,
+	0x69, 0x59, 0x3d, 0xf3, 0x58, 0xcf, 0xb0, 0xf0, 0xda, 0xaf, 0xcd, 0x96, 0x63, 0x1e, 0xeb, 0x8a,
+	0x81, 0x93, 0x01, 0xf4, 0xc8, 0x92, 0x04, 0x1e, 0xbd, 0x36, 0x5e, 0x25, 0xb2, 0xb7, 0xec, 0x63,
+	0xb3, 0x6f, 0x3a, 0x1d, 0xcb, 0x6e, 0xf5, 0x4d, 0x01, 0x7f, 0xd3, 0x72, 0x6c, 0x96, 0x8d, 0xc2,
+	0x66, 0x67, 0xc7, 0xb2, 0xbb, 0x8e, 0xae, 0xf2, 0x65, 0xeb, 0x65, 0xd7, 0xd1, 0xb3, 0x2c, 0xc7,
+	0xb6, 0x63, 0xf5, 0xad, 0x76, 0xeb, 0xb5, 0x9e, 0x33, 0x7e, 0xce, 0x03, 0xc4, 0x3e, 0x58, 0xd5,
+	0x3c, 0x57, 0x1e, 0x1c, 0xd5, 0x73, 0xd1, 0xc7, 0xb2, 0xea, 0x2a, 0xaf, 0xfa, 0xbd, 0xa8, 0x5e,
+	0xe9, 0x7a, 0xa4, 0xea, 0xfe, 0x19, 0x94, 0x86, 0x92, 0x2a, 0x3e, 0x5b, 0xb7, 0x9b, 0x7b, 0x37,
+	0x51, 0x11, 0x99, 0x12, 0xb9, 0x42, 0xa0, 0x03, 0xc8, 0x87, 0x8c, 0x1c, 0x3e, 0x72, 0x13, 0x77,
+	0xcc, 0x1a, 0x77, 0x12, 0x27, 0x74, 0x99, 0xcb, 0x50, 0x92, 0xc3, 0xa7, 0xf1, 0x46, 0x97, 0x11,
+	0x7d, 0x91, 0xcb, 0x08, 0x81, 0x9e, 0x40, 0x39, 0xc0, 0x5e, 0x48, 0xdc, 0x01, 0x0d, 0xab, 0x05,
+	0xde, 0x1e, 0xb5, 0xba, 0xb8, 0x56, 0xeb, 0xd1, 0xb5, 0x5a, 0xef, 0x47, 0xd7, 0xaa, 0x53, 0x12,
+	0xca, 0xfd, 0x10, 0x3d, 0x03, 0x2d, 0x20, 0x73, 0x3f, 0xa0, 0x02, 0x5a, 0x7c, 0x2f, 0x14, 0x22,
+	0xf5, 0x7e, 0x88, 0x9e, 0x02, 0x0c, 0xc7, 0x78, 0x36, 0x12, 0xd8, 0xd2, 0x7b, 0xb1, 0x65, 0xa9,
+	0xdd, 0x0f, 0xd1, 0x03, 0xe6, 0x37, 0xf4, 0x17, 0x81, 0xb8, 0x7f, 0xca, 0xbc, 0x58, 0x10, 0x89,
+	0x2c, 0x17, 0xed, 0x81, 0xe6, 0x92, 0x70, 0x18, 0x78, 0x73, 0xd6, 0x4a, 0x55, 0xe0, 0x0a, 0x49,
+	0x11, 0x7a, 0x1a, 0xdf, 0x51, 0x1a, 0xef, 0xc4, 0x07, 0x37, 0x09, 0x7b, 0xc7, 0xfd, 0xb4, 0xf1,
+	0x12, 0xad, 0x6c, 0xbe, 0x44, 0x1f, 0xc2, 0x0e, 0x66, 0xf6, 0x06, 0xec, 0x64, 0x0c, 0x66, 0x78,
+	0x4a, 0xaa, 0x5b, 0x5c, 0x73, 0x8b, 0x8b, 0x59, 0x25, 0x6c, 0x3c, 0x25, 0xb7, 0xba, 0xdf, 0x7e,
+	0x57, 0x40, 0x13, 0x0e, 0xc5, 0x09, 0x5e, 0x63, 0x47, 0xb9, 0xc1, 0xce, 0x23, 0xd8, 0x95, 0x81,
+	0xf3, 0x97, 0x98, 0x08, 0x4b, 0x98, 0xdd, 0x71, 0x63, 0x43, 0x2c, 0xb0, 0x75, 0x26, 0xb3, 0x37,
+	0x99, 0x3c, 0x8a, 0x99, 0xcc, 0x71, 0x26, 0x57, 0x47, 0x2f, 0x11, 0xd4, 0x66, 0x2a, 0x6f, 0x95,
+	0xf6, 0x12, 0xb6, 0xd2, 0x63, 0xe9, 0x6f, 0x9a, 0x89, 0x27, 0xa0, 0x8b, 0x76, 0x5a, 0x5c, 0xdc,
+	0x72, 0x22, 0xbe, 0x81, 0x72, 0x3c, 0xd1, 0x5f, 0x46, 0x26, 0x74, 0xa8, 0xb4, 0xbb, 0xf6, 0x57,
+	0xd6, 0x8b, 0x81, 0x79, 0xce, 0x82, 0xcb, 0xb0, 0x58, 0x5f, 0x9d, 0x5a, 0xf2, 0x53, 0x61, 0xe1,
+	0xad, 0x3e, 0x9b, 0xba, 0xca, 0x00, 0xc7, 0x26, 0x0b, 0x5d, 0x6a, 0x64, 0x8d, 0xdf, 0x54, 0xd0,
+	0xb8, 0xe5, 0x13, 0x82, 0x5d, 0x12, 0xdc, 0x98, 0x69, 0x4f, 0x12, 0x13, 0x4a, 0xcc, 0xb5, 0xbb,
+	0x51, 0xcd, 0xfe, 0x7c, 0x38, 0xb5, 0xa0, 0x12, 0x2e, 0x2e, 0x06, 0x6b, 0xe3, 0xed, 0x7e, 0x0a,
+	0x9c, 0xe0, 0x45, 0xe2, 0xb5, 0x30, 0x16, 0xa1, 0xc7, 0x72, 0x9e, 0x8a, 0xf1, 0xf6, 0x9f, 0x14,
+	0xf4, 0xc6, 0x28, 0xfd, 0x3f, 0x54, 0x78, 0xe3, 0x2c, 0x49, 0x10, 0xb2, 0xe3, 0x27, 0x5e, 0x9a,
+	0x1a, 0x93, 0x9d, 0x0b, 0xd1, 0x3f, 0x33, 0xbc, 0x8c, 0x1f, 0x55, 0xc8, 0x8b, 0x6e, 0x7b, 0x0c,
+	0x85, 0x31, 0x67, 0x59, 0xbe, 0xd9, 0xee, 0xa4, 0x32, 0x12, 0x05, 0x70, 0xa4, 0x0a, 0x3a, 0x84,
+	0xca, 0x90, 0xff, 0x57, 0x10, 0x9d, 0x27, 0xdf, 0x22, 0x77, 0x36, 0xfc, 0x8f, 0x38, 0xc9, 0x38,
+	0xda, 0x30, 0xf1, 0xcf, 0xa3, 0x01, 0xe5, 0xab, 0xb9, 0x27, 0x61, 0x59, 0x0e, 0xd3, 0xd7, 0x5f,
+	0x20, 0x27, 0x19, 0xa7, 0x74, 0x15, 0x3d, 0xc7, 0x9a, 0x00, 0x2b, 0x40, 0x93, 0xb3, 0xad, 0x35,
+	0x77, 0xd7, 0x11, 0xcd, 0x93, 0x8c, 0x53, 0xbe, 0x5a, 0xbd, 0x70, 0x0e, 0xa1, 0x92, 0x1c, 0x0c,
+	0x9c, 0xee, 0x44, 0x78, 0x89, 0x7e, 0x66, 0xe1, 0x25, 0x46, 0xc5, 0xf3, 0x0a, 0x80, 0x98, 0x25,
+	0xac, 0x34, 0xcf, 0x4d, 0xb8, 0xe3, 0x07, 0xa3, 0xba, 0x3f, 0x27, 0xb3, 0xa1, 0x1f, 0xb8, 0x12,
+	0xff, 0x6d, 0x7d, 0xe4, 0xd1, 0xf1, 0xe2, 0xa2, 0x3e, 0xf4, 0xa7, 0x8d, 0x68, 0xaf, 0x21, 0xf6,
+	0x3e, 0x92, 0x7f, 0xfb, 0x96, 0x07, 0x8d, 0x91, 0x2f, 0x65, 0x17, 0x05, 0x2e, 0x3c, 0xf8, 0x23,
+	0x00, 0x00, 0xff, 0xff, 0xe5, 0xa6, 0xf9, 0x0e, 0x3f, 0x0e, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/health.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/health.pb.go
new file mode 100644
index 0000000..42b0541
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/health.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/health.proto
+
+package voltha
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Health states
+type HealthStatus_HealthState int32
+
+const (
+	HealthStatus_HEALTHY    HealthStatus_HealthState = 0
+	HealthStatus_OVERLOADED HealthStatus_HealthState = 1
+	HealthStatus_DYING      HealthStatus_HealthState = 2
+)
+
+var HealthStatus_HealthState_name = map[int32]string{
+	0: "HEALTHY",
+	1: "OVERLOADED",
+	2: "DYING",
+}
+
+var HealthStatus_HealthState_value = map[string]int32{
+	"HEALTHY":    0,
+	"OVERLOADED": 1,
+	"DYING":      2,
+}
+
+func (x HealthStatus_HealthState) String() string {
+	return proto.EnumName(HealthStatus_HealthState_name, int32(x))
+}
+
+func (HealthStatus_HealthState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_dd1fc2b2d96d69b8, []int{0, 0}
+}
+
+// Encode health status of a Voltha instance
+type HealthStatus struct {
+	// Current state of health of this Voltha instance
+	State                HealthStatus_HealthState `protobuf:"varint,1,opt,name=state,proto3,enum=voltha.HealthStatus_HealthState" json:"state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *HealthStatus) Reset()         { *m = HealthStatus{} }
+func (m *HealthStatus) String() string { return proto.CompactTextString(m) }
+func (*HealthStatus) ProtoMessage()    {}
+func (*HealthStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_dd1fc2b2d96d69b8, []int{0}
+}
+
+func (m *HealthStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_HealthStatus.Unmarshal(m, b)
+}
+func (m *HealthStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_HealthStatus.Marshal(b, m, deterministic)
+}
+func (m *HealthStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthStatus.Merge(m, src)
+}
+func (m *HealthStatus) XXX_Size() int {
+	return xxx_messageInfo_HealthStatus.Size(m)
+}
+func (m *HealthStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_HealthStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HealthStatus proto.InternalMessageInfo
+
+func (m *HealthStatus) GetState() HealthStatus_HealthState {
+	if m != nil {
+		return m.State
+	}
+	return HealthStatus_HEALTHY
+}
+
+func init() {
+	proto.RegisterEnum("voltha.HealthStatus_HealthState", HealthStatus_HealthState_name, HealthStatus_HealthState_value)
+	proto.RegisterType((*HealthStatus)(nil), "voltha.HealthStatus")
+}
+
+func init() { proto.RegisterFile("voltha_protos/health.proto", fileDescriptor_dd1fc2b2d96d69b8) }
+
+var fileDescriptor_dd1fc2b2d96d69b8 = []byte{
+	// 302 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2a, 0xcb, 0xcf, 0x29,
+	0xc9, 0x48, 0x8c, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x2f, 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9,
+	0xd0, 0x03, 0xf3, 0x84, 0xd8, 0x20, 0x72, 0x52, 0x32, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa,
+	0x89, 0x05, 0x99, 0xfa, 0x89, 0x79, 0x79, 0xf9, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xc5, 0x10,
+	0x55, 0x52, 0xd2, 0x50, 0x59, 0x30, 0x2f, 0xa9, 0x34, 0x4d, 0x3f, 0x35, 0xb7, 0xa0, 0xa4, 0x12,
+	0x2a, 0x29, 0x81, 0x6a, 0x7c, 0x6e, 0x6a, 0x49, 0x22, 0x44, 0x46, 0xa9, 0x85, 0x91, 0x8b, 0xc7,
+	0x03, 0x6c, 0x5b, 0x70, 0x49, 0x62, 0x49, 0x69, 0xb1, 0x90, 0x2d, 0x17, 0x6b, 0x71, 0x49, 0x62,
+	0x49, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x9f, 0x91, 0x82, 0x1e, 0x44, 0xab, 0x1e, 0xb2, 0x22,
+	0x24, 0x4e, 0xaa, 0x13, 0xeb, 0x8b, 0x6f, 0x67, 0x65, 0x19, 0x83, 0x20, 0xba, 0x94, 0x4c, 0xb9,
+	0xb8, 0x91, 0x24, 0x85, 0xb8, 0xb9, 0xd8, 0x3d, 0x5c, 0x1d, 0x7d, 0x42, 0x3c, 0x22, 0x05, 0x18,
+	0x84, 0xf8, 0xb8, 0xb8, 0xfc, 0xc3, 0x5c, 0x83, 0x7c, 0xfc, 0x1d, 0x5d, 0x5c, 0x5d, 0x04, 0x18,
+	0x85, 0x38, 0xb9, 0x58, 0x5d, 0x22, 0x3d, 0xfd, 0xdc, 0x05, 0x98, 0x8c, 0x12, 0xb9, 0x78, 0xa1,
+	0xda, 0x52, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x85, 0x02, 0xb8, 0xf8, 0xdd, 0x53, 0x4b, 0x50, 0x5c,
+	0x26, 0xa6, 0x07, 0xf1, 0xa2, 0x1e, 0xcc, 0x8b, 0x7a, 0xae, 0x20, 0x2f, 0x4a, 0x89, 0x60, 0x73,
+	0xa2, 0x12, 0x7f, 0xd3, 0xe5, 0x27, 0x93, 0x99, 0x38, 0x85, 0xd8, 0xa1, 0x81, 0xe9, 0xe4, 0xca,
+	0x25, 0x9c, 0x5f, 0x94, 0xae, 0x97, 0x5f, 0x90, 0x9a, 0x97, 0x9c, 0x5f, 0x94, 0x02, 0xd5, 0x14,
+	0xa5, 0x97, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x93, 0xd3, 0x87,
+	0xc8, 0xe9, 0x42, 0x83, 0xab, 0xcc, 0x58, 0x3f, 0x3d, 0x1f, 0x2a, 0x96, 0xc4, 0x06, 0x16, 0x34,
+	0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x39, 0x87, 0x64, 0xd7, 0xb2, 0x01, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// HealthServiceClient is the client API for HealthService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type HealthServiceClient interface {
+	// Return current health status of a Voltha instance
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*HealthStatus, error)
+}
+
+type healthServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewHealthServiceClient(cc *grpc.ClientConn) HealthServiceClient {
+	return &healthServiceClient{cc}
+}
+
+func (c *healthServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*HealthStatus, error) {
+	out := new(HealthStatus)
+	err := c.cc.Invoke(ctx, "/voltha.HealthService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// HealthServiceServer is the server API for HealthService service.
+type HealthServiceServer interface {
+	// Return current health status of a Voltha instance
+	GetHealthStatus(context.Context, *empty.Empty) (*HealthStatus, error)
+}
+
+func RegisterHealthServiceServer(s *grpc.Server, srv HealthServiceServer) {
+	s.RegisterService(&_HealthService_serviceDesc, srv)
+}
+
+func _HealthService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(HealthServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.HealthService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(HealthServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _HealthService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.HealthService",
+	HandlerType: (*HealthServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _HealthService_GetHealthStatus_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/health.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/logical_device.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/logical_device.pb.go
new file mode 100644
index 0000000..8ebaa02
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/logical_device.pb.go
@@ -0,0 +1,391 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/logical_device.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "github.com/opencord/voltha-protos/v3/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type LogicalPortId struct {
+	// unique id of logical device
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// id of the port on the logical device
+	PortId               string   `protobuf:"bytes,2,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LogicalPortId) Reset()         { *m = LogicalPortId{} }
+func (m *LogicalPortId) String() string { return proto.CompactTextString(m) }
+func (*LogicalPortId) ProtoMessage()    {}
+func (*LogicalPortId) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{0}
+}
+
+func (m *LogicalPortId) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPortId.Unmarshal(m, b)
+}
+func (m *LogicalPortId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPortId.Marshal(b, m, deterministic)
+}
+func (m *LogicalPortId) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPortId.Merge(m, src)
+}
+func (m *LogicalPortId) XXX_Size() int {
+	return xxx_messageInfo_LogicalPortId.Size(m)
+}
+func (m *LogicalPortId) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPortId.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPortId proto.InternalMessageInfo
+
+func (m *LogicalPortId) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalPortId) GetPortId() string {
+	if m != nil {
+		return m.PortId
+	}
+	return ""
+}
+
+type LogicalPort struct {
+	Id                   string                    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	OfpPort              *openflow_13.OfpPort      `protobuf:"bytes,2,opt,name=ofp_port,json=ofpPort,proto3" json:"ofp_port,omitempty"`
+	DeviceId             string                    `protobuf:"bytes,3,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DevicePortNo         uint32                    `protobuf:"varint,4,opt,name=device_port_no,json=devicePortNo,proto3" json:"device_port_no,omitempty"`
+	RootPort             bool                      `protobuf:"varint,5,opt,name=root_port,json=rootPort,proto3" json:"root_port,omitempty"`
+	OfpPortStats         *openflow_13.OfpPortStats `protobuf:"bytes,6,opt,name=ofp_port_stats,json=ofpPortStats,proto3" json:"ofp_port_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *LogicalPort) Reset()         { *m = LogicalPort{} }
+func (m *LogicalPort) String() string { return proto.CompactTextString(m) }
+func (*LogicalPort) ProtoMessage()    {}
+func (*LogicalPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{1}
+}
+
+func (m *LogicalPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPort.Unmarshal(m, b)
+}
+func (m *LogicalPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPort.Marshal(b, m, deterministic)
+}
+func (m *LogicalPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPort.Merge(m, src)
+}
+func (m *LogicalPort) XXX_Size() int {
+	return xxx_messageInfo_LogicalPort.Size(m)
+}
+func (m *LogicalPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPort proto.InternalMessageInfo
+
+func (m *LogicalPort) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalPort) GetOfpPort() *openflow_13.OfpPort {
+	if m != nil {
+		return m.OfpPort
+	}
+	return nil
+}
+
+func (m *LogicalPort) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *LogicalPort) GetDevicePortNo() uint32 {
+	if m != nil {
+		return m.DevicePortNo
+	}
+	return 0
+}
+
+func (m *LogicalPort) GetRootPort() bool {
+	if m != nil {
+		return m.RootPort
+	}
+	return false
+}
+
+func (m *LogicalPort) GetOfpPortStats() *openflow_13.OfpPortStats {
+	if m != nil {
+		return m.OfpPortStats
+	}
+	return nil
+}
+
+type LogicalPorts struct {
+	Items                []*LogicalPort `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *LogicalPorts) Reset()         { *m = LogicalPorts{} }
+func (m *LogicalPorts) String() string { return proto.CompactTextString(m) }
+func (*LogicalPorts) ProtoMessage()    {}
+func (*LogicalPorts) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{2}
+}
+
+func (m *LogicalPorts) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPorts.Unmarshal(m, b)
+}
+func (m *LogicalPorts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPorts.Marshal(b, m, deterministic)
+}
+func (m *LogicalPorts) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPorts.Merge(m, src)
+}
+func (m *LogicalPorts) XXX_Size() int {
+	return xxx_messageInfo_LogicalPorts.Size(m)
+}
+func (m *LogicalPorts) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPorts.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPorts proto.InternalMessageInfo
+
+func (m *LogicalPorts) GetItems() []*LogicalPort {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type LogicalDevice struct {
+	// unique id of logical device
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// unique datapath id for the logical device (used by the SDN controller)
+	DatapathId uint64 `protobuf:"varint,2,opt,name=datapath_id,json=datapathId,proto3" json:"datapath_id,omitempty"`
+	// device description
+	Desc *openflow_13.OfpDesc `protobuf:"bytes,3,opt,name=desc,proto3" json:"desc,omitempty"`
+	// device features
+	SwitchFeatures *openflow_13.OfpSwitchFeatures `protobuf:"bytes,4,opt,name=switch_features,json=switchFeatures,proto3" json:"switch_features,omitempty"`
+	// name of the root device anchoring logical device
+	RootDeviceId string `protobuf:"bytes,5,opt,name=root_device_id,json=rootDeviceId,proto3" json:"root_device_id,omitempty"`
+	// logical device ports
+	Ports []*LogicalPort `protobuf:"bytes,128,rep,name=ports,proto3" json:"ports,omitempty"`
+	// flows configured on the logical device
+	Flows *openflow_13.Flows `protobuf:"bytes,129,opt,name=flows,proto3" json:"flows,omitempty"`
+	// flow groups configured on the logical device
+	FlowGroups *openflow_13.FlowGroups `protobuf:"bytes,130,opt,name=flow_groups,json=flowGroups,proto3" json:"flow_groups,omitempty"`
+	// meters configured on the logical device
+	Meters               *openflow_13.Meters `protobuf:"bytes,131,opt,name=meters,proto3" json:"meters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *LogicalDevice) Reset()         { *m = LogicalDevice{} }
+func (m *LogicalDevice) String() string { return proto.CompactTextString(m) }
+func (*LogicalDevice) ProtoMessage()    {}
+func (*LogicalDevice) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{3}
+}
+
+func (m *LogicalDevice) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalDevice.Unmarshal(m, b)
+}
+func (m *LogicalDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalDevice.Marshal(b, m, deterministic)
+}
+func (m *LogicalDevice) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalDevice.Merge(m, src)
+}
+func (m *LogicalDevice) XXX_Size() int {
+	return xxx_messageInfo_LogicalDevice.Size(m)
+}
+func (m *LogicalDevice) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalDevice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalDevice proto.InternalMessageInfo
+
+func (m *LogicalDevice) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalDevice) GetDatapathId() uint64 {
+	if m != nil {
+		return m.DatapathId
+	}
+	return 0
+}
+
+func (m *LogicalDevice) GetDesc() *openflow_13.OfpDesc {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetSwitchFeatures() *openflow_13.OfpSwitchFeatures {
+	if m != nil {
+		return m.SwitchFeatures
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetRootDeviceId() string {
+	if m != nil {
+		return m.RootDeviceId
+	}
+	return ""
+}
+
+func (m *LogicalDevice) GetPorts() []*LogicalPort {
+	if m != nil {
+		return m.Ports
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetFlows() *openflow_13.Flows {
+	if m != nil {
+		return m.Flows
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetFlowGroups() *openflow_13.FlowGroups {
+	if m != nil {
+		return m.FlowGroups
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetMeters() *openflow_13.Meters {
+	if m != nil {
+		return m.Meters
+	}
+	return nil
+}
+
+type LogicalDevices struct {
+	Items                []*LogicalDevice `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *LogicalDevices) Reset()         { *m = LogicalDevices{} }
+func (m *LogicalDevices) String() string { return proto.CompactTextString(m) }
+func (*LogicalDevices) ProtoMessage()    {}
+func (*LogicalDevices) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{4}
+}
+
+func (m *LogicalDevices) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalDevices.Unmarshal(m, b)
+}
+func (m *LogicalDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalDevices.Marshal(b, m, deterministic)
+}
+func (m *LogicalDevices) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalDevices.Merge(m, src)
+}
+func (m *LogicalDevices) XXX_Size() int {
+	return xxx_messageInfo_LogicalDevices.Size(m)
+}
+func (m *LogicalDevices) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalDevices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalDevices proto.InternalMessageInfo
+
+func (m *LogicalDevices) GetItems() []*LogicalDevice {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*LogicalPortId)(nil), "voltha.LogicalPortId")
+	proto.RegisterType((*LogicalPort)(nil), "voltha.LogicalPort")
+	proto.RegisterType((*LogicalPorts)(nil), "voltha.LogicalPorts")
+	proto.RegisterType((*LogicalDevice)(nil), "voltha.LogicalDevice")
+	proto.RegisterType((*LogicalDevices)(nil), "voltha.LogicalDevices")
+}
+
+func init() { proto.RegisterFile("voltha_protos/logical_device.proto", fileDescriptor_caf139ab3abc8240) }
+
+var fileDescriptor_caf139ab3abc8240 = []byte{
+	// 550 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xcf, 0x6a, 0xdb, 0x40,
+	0x10, 0xc6, 0xab, 0xd8, 0x72, 0xec, 0x91, 0xe3, 0xc0, 0x9a, 0x10, 0x91, 0xb4, 0xc4, 0x88, 0x1e,
+	0x1c, 0x4a, 0xa5, 0xd4, 0xa6, 0xd0, 0x1e, 0x0a, 0xad, 0x09, 0x2e, 0x86, 0xfe, 0x63, 0x0b, 0x3d,
+	0xf4, 0x22, 0x36, 0xd2, 0x5a, 0x16, 0xd8, 0x1a, 0xa1, 0x5d, 0x3b, 0xd7, 0xfe, 0x79, 0xad, 0xbe,
+	0x42, 0x5f, 0xa2, 0x8f, 0xd0, 0x43, 0xcf, 0x65, 0x77, 0xa5, 0xd4, 0x8a, 0xd3, 0xa3, 0xbe, 0xf9,
+	0x7d, 0x33, 0xb3, 0xdf, 0x20, 0xf0, 0x36, 0xb8, 0x94, 0x0b, 0x16, 0xe6, 0x05, 0x4a, 0x14, 0xc1,
+	0x12, 0x93, 0x34, 0x62, 0xcb, 0x30, 0xe6, 0x9b, 0x34, 0xe2, 0xbe, 0x56, 0x49, 0xcb, 0x30, 0x27,
+	0xf7, 0x13, 0xc4, 0x64, 0xc9, 0x03, 0x96, 0xa7, 0x01, 0xcb, 0x32, 0x94, 0x4c, 0xa6, 0x98, 0x09,
+	0x43, 0x9d, 0xb8, 0xf5, 0x4e, 0x2b, 0x2e, 0x59, 0x59, 0x39, 0xab, 0x57, 0x30, 0xe7, 0xd9, 0x7c,
+	0x89, 0xd7, 0xe1, 0x93, 0xb1, 0x01, 0xbc, 0x67, 0x70, 0xf0, 0xc6, 0x0c, 0xfe, 0x80, 0x85, 0x9c,
+	0xc5, 0xa4, 0x07, 0x7b, 0x69, 0xec, 0x5a, 0x03, 0x6b, 0xd8, 0xa1, 0x7b, 0x69, 0x4c, 0x8e, 0x61,
+	0x3f, 0xc7, 0x42, 0x86, 0x69, 0xec, 0xee, 0x69, 0xb1, 0x95, 0x6b, 0xd0, 0xfb, 0x6d, 0x81, 0xb3,
+	0x65, 0xdd, 0x31, 0x5e, 0x40, 0x1b, 0xe7, 0x79, 0xa8, 0x68, 0xed, 0x74, 0x46, 0x47, 0xfe, 0xf6,
+	0xfc, 0xaa, 0x48, 0xf7, 0x71, 0x9e, 0xeb, 0x0e, 0xa7, 0xd0, 0x31, 0x8f, 0x57, 0xc3, 0x1a, 0xba,
+	0x51, 0xdb, 0x08, 0xb3, 0x98, 0x3c, 0x84, 0x5e, 0x59, 0xd4, 0xeb, 0x64, 0xe8, 0x36, 0x07, 0xd6,
+	0xf0, 0x80, 0x76, 0x8d, 0xaa, 0x1a, 0xbc, 0x43, 0xd5, 0xa2, 0x40, 0x94, 0x66, 0xaa, 0x3d, 0xb0,
+	0x86, 0x6d, 0xda, 0x56, 0x82, 0xee, 0xff, 0x0a, 0x7a, 0xd5, 0xd0, 0x50, 0x48, 0x26, 0x85, 0xdb,
+	0xd2, 0x7b, 0x9d, 0xde, 0xb9, 0x97, 0x41, 0x68, 0xb7, 0xdc, 0xee, 0xa3, 0xfa, 0xf2, 0x9e, 0x43,
+	0x77, 0xeb, 0xcd, 0x82, 0x9c, 0x83, 0x9d, 0x4a, 0xbe, 0x12, 0xae, 0x35, 0x68, 0x0c, 0x9d, 0x51,
+	0xdf, 0x37, 0x79, 0xfb, 0x5b, 0x10, 0x35, 0x84, 0xf7, 0xa3, 0x71, 0x13, 0xf5, 0xa5, 0x5e, 0x79,
+	0x27, 0xb1, 0x33, 0x70, 0x62, 0x26, 0x59, 0xce, 0xe4, 0xa2, 0x8a, 0xbb, 0x49, 0xa1, 0x92, 0x66,
+	0x31, 0x39, 0x87, 0x66, 0xcc, 0x45, 0xa4, 0xb3, 0xb9, 0x2b, 0x4e, 0x55, 0xa4, 0x1a, 0x21, 0x33,
+	0x38, 0x14, 0xd7, 0xa9, 0x8c, 0x16, 0xe1, 0x9c, 0x33, 0xb9, 0x2e, 0xb8, 0xd0, 0x79, 0x39, 0xa3,
+	0xc1, 0x8e, 0xeb, 0x16, 0x47, 0x7b, 0x46, 0x98, 0x96, 0xdf, 0x2a, 0x79, 0x9d, 0xe9, 0xbf, 0xdb,
+	0xd8, 0x7a, 0xe5, 0xae, 0x52, 0x2f, 0xab, 0xfb, 0x3c, 0x05, 0x5b, 0xa5, 0x26, 0xdc, 0x2f, 0xff,
+	0x8f, 0x62, 0xd2, 0xf9, 0xf5, 0xe7, 0xe7, 0x83, 0xa6, 0x7a, 0x36, 0x35, 0x34, 0xb9, 0x00, 0x5b,
+	0xed, 0x22, 0xdc, 0xaf, 0x96, 0x5e, 0x8f, 0xd4, 0xd6, 0x9b, 0xaa, 0xd2, 0xc4, 0x56, 0xae, 0x7b,
+	0xd4, 0x80, 0xe4, 0x25, 0x38, 0xba, 0x9c, 0x14, 0xb8, 0xce, 0x85, 0xfb, 0xcd, 0xf8, 0x8e, 0x77,
+	0x7c, 0xaf, 0x75, 0xbd, 0x32, 0xc3, 0xfc, 0x46, 0x22, 0x63, 0x68, 0xad, 0xb8, 0xe4, 0x85, 0x70,
+	0xbf, 0x1b, 0x73, 0xbf, 0x66, 0x7e, 0xab, 0x6b, 0x95, 0xb1, 0x44, 0xbd, 0x17, 0xd0, 0xab, 0x5d,
+	0x4f, 0x90, 0x47, 0xf5, 0xdb, 0x1f, 0xdd, 0x7a, 0xb0, 0xc1, 0xca, 0xeb, 0x4f, 0x3e, 0x41, 0x1f,
+	0x8b, 0x44, 0xcf, 0x89, 0xb0, 0x88, 0x4b, 0x76, 0x72, 0xf8, 0x7e, 0x5a, 0xc3, 0x3f, 0xfb, 0x49,
+	0x2a, 0x17, 0xeb, 0x2b, 0x3f, 0xc2, 0x55, 0x50, 0xc1, 0x81, 0x81, 0x1f, 0x97, 0x3f, 0xf1, 0x66,
+	0x1c, 0x24, 0x58, 0x6a, 0x57, 0x2d, 0x2d, 0x8e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xdd, 0xad,
+	0x36, 0xdf, 0x4d, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/ponsim.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/ponsim.pb.go
new file mode 100644
index 0000000..1dc6e94
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/ponsim.pb.go
@@ -0,0 +1,686 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/ponsim.proto
+
+package voltha
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	openflow_13 "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	grpc "google.golang.org/grpc"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PonSimOnuDeviceInfo struct {
+	UniPort              int32    `protobuf:"varint,1,opt,name=uni_port,json=uniPort,proto3" json:"uni_port,omitempty"`
+	SerialNumber         string   `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PonSimOnuDeviceInfo) Reset()         { *m = PonSimOnuDeviceInfo{} }
+func (m *PonSimOnuDeviceInfo) String() string { return proto.CompactTextString(m) }
+func (*PonSimOnuDeviceInfo) ProtoMessage()    {}
+func (*PonSimOnuDeviceInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{0}
+}
+
+func (m *PonSimOnuDeviceInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimOnuDeviceInfo.Unmarshal(m, b)
+}
+func (m *PonSimOnuDeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimOnuDeviceInfo.Marshal(b, m, deterministic)
+}
+func (m *PonSimOnuDeviceInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimOnuDeviceInfo.Merge(m, src)
+}
+func (m *PonSimOnuDeviceInfo) XXX_Size() int {
+	return xxx_messageInfo_PonSimOnuDeviceInfo.Size(m)
+}
+func (m *PonSimOnuDeviceInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimOnuDeviceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimOnuDeviceInfo proto.InternalMessageInfo
+
+func (m *PonSimOnuDeviceInfo) GetUniPort() int32 {
+	if m != nil {
+		return m.UniPort
+	}
+	return 0
+}
+
+func (m *PonSimOnuDeviceInfo) GetSerialNumber() string {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return ""
+}
+
+type PonSimDeviceInfo struct {
+	NniPort              int32                  `protobuf:"varint,1,opt,name=nni_port,json=nniPort,proto3" json:"nni_port,omitempty"`
+	Onus                 []*PonSimOnuDeviceInfo `protobuf:"bytes,2,rep,name=onus,proto3" json:"onus,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *PonSimDeviceInfo) Reset()         { *m = PonSimDeviceInfo{} }
+func (m *PonSimDeviceInfo) String() string { return proto.CompactTextString(m) }
+func (*PonSimDeviceInfo) ProtoMessage()    {}
+func (*PonSimDeviceInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{1}
+}
+
+func (m *PonSimDeviceInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimDeviceInfo.Unmarshal(m, b)
+}
+func (m *PonSimDeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimDeviceInfo.Marshal(b, m, deterministic)
+}
+func (m *PonSimDeviceInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimDeviceInfo.Merge(m, src)
+}
+func (m *PonSimDeviceInfo) XXX_Size() int {
+	return xxx_messageInfo_PonSimDeviceInfo.Size(m)
+}
+func (m *PonSimDeviceInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimDeviceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimDeviceInfo proto.InternalMessageInfo
+
+func (m *PonSimDeviceInfo) GetNniPort() int32 {
+	if m != nil {
+		return m.NniPort
+	}
+	return 0
+}
+
+func (m *PonSimDeviceInfo) GetOnus() []*PonSimOnuDeviceInfo {
+	if m != nil {
+		return m.Onus
+	}
+	return nil
+}
+
+type FlowTable struct {
+	Port                 int32                       `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	Flows                []*openflow_13.OfpFlowStats `protobuf:"bytes,2,rep,name=flows,proto3" json:"flows,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *FlowTable) Reset()         { *m = FlowTable{} }
+func (m *FlowTable) String() string { return proto.CompactTextString(m) }
+func (*FlowTable) ProtoMessage()    {}
+func (*FlowTable) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{2}
+}
+
+func (m *FlowTable) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowTable.Unmarshal(m, b)
+}
+func (m *FlowTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowTable.Marshal(b, m, deterministic)
+}
+func (m *FlowTable) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowTable.Merge(m, src)
+}
+func (m *FlowTable) XXX_Size() int {
+	return xxx_messageInfo_FlowTable.Size(m)
+}
+func (m *FlowTable) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowTable.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowTable proto.InternalMessageInfo
+
+func (m *FlowTable) GetPort() int32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *FlowTable) GetFlows() []*openflow_13.OfpFlowStats {
+	if m != nil {
+		return m.Flows
+	}
+	return nil
+}
+
+type PonSimFrame struct {
+	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Payload              []byte   `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
+	OutPort              int32    `protobuf:"varint,3,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PonSimFrame) Reset()         { *m = PonSimFrame{} }
+func (m *PonSimFrame) String() string { return proto.CompactTextString(m) }
+func (*PonSimFrame) ProtoMessage()    {}
+func (*PonSimFrame) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{3}
+}
+
+func (m *PonSimFrame) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimFrame.Unmarshal(m, b)
+}
+func (m *PonSimFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimFrame.Marshal(b, m, deterministic)
+}
+func (m *PonSimFrame) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimFrame.Merge(m, src)
+}
+func (m *PonSimFrame) XXX_Size() int {
+	return xxx_messageInfo_PonSimFrame.Size(m)
+}
+func (m *PonSimFrame) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimFrame.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimFrame proto.InternalMessageInfo
+
+func (m *PonSimFrame) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PonSimFrame) GetPayload() []byte {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *PonSimFrame) GetOutPort() int32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+type PonSimPacketCounter struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value                int64    `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PonSimPacketCounter) Reset()         { *m = PonSimPacketCounter{} }
+func (m *PonSimPacketCounter) String() string { return proto.CompactTextString(m) }
+func (*PonSimPacketCounter) ProtoMessage()    {}
+func (*PonSimPacketCounter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{4}
+}
+
+func (m *PonSimPacketCounter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimPacketCounter.Unmarshal(m, b)
+}
+func (m *PonSimPacketCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimPacketCounter.Marshal(b, m, deterministic)
+}
+func (m *PonSimPacketCounter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimPacketCounter.Merge(m, src)
+}
+func (m *PonSimPacketCounter) XXX_Size() int {
+	return xxx_messageInfo_PonSimPacketCounter.Size(m)
+}
+func (m *PonSimPacketCounter) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimPacketCounter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimPacketCounter proto.InternalMessageInfo
+
+func (m *PonSimPacketCounter) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *PonSimPacketCounter) GetValue() int64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+type PonSimPortMetrics struct {
+	PortName             string                 `protobuf:"bytes,1,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"`
+	Packets              []*PonSimPacketCounter `protobuf:"bytes,2,rep,name=packets,proto3" json:"packets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *PonSimPortMetrics) Reset()         { *m = PonSimPortMetrics{} }
+func (m *PonSimPortMetrics) String() string { return proto.CompactTextString(m) }
+func (*PonSimPortMetrics) ProtoMessage()    {}
+func (*PonSimPortMetrics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{5}
+}
+
+func (m *PonSimPortMetrics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimPortMetrics.Unmarshal(m, b)
+}
+func (m *PonSimPortMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimPortMetrics.Marshal(b, m, deterministic)
+}
+func (m *PonSimPortMetrics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimPortMetrics.Merge(m, src)
+}
+func (m *PonSimPortMetrics) XXX_Size() int {
+	return xxx_messageInfo_PonSimPortMetrics.Size(m)
+}
+func (m *PonSimPortMetrics) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimPortMetrics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimPortMetrics proto.InternalMessageInfo
+
+func (m *PonSimPortMetrics) GetPortName() string {
+	if m != nil {
+		return m.PortName
+	}
+	return ""
+}
+
+func (m *PonSimPortMetrics) GetPackets() []*PonSimPacketCounter {
+	if m != nil {
+		return m.Packets
+	}
+	return nil
+}
+
+type PonSimMetrics struct {
+	Device               string               `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
+	Metrics              []*PonSimPortMetrics `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *PonSimMetrics) Reset()         { *m = PonSimMetrics{} }
+func (m *PonSimMetrics) String() string { return proto.CompactTextString(m) }
+func (*PonSimMetrics) ProtoMessage()    {}
+func (*PonSimMetrics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{6}
+}
+
+func (m *PonSimMetrics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimMetrics.Unmarshal(m, b)
+}
+func (m *PonSimMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimMetrics.Marshal(b, m, deterministic)
+}
+func (m *PonSimMetrics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimMetrics.Merge(m, src)
+}
+func (m *PonSimMetrics) XXX_Size() int {
+	return xxx_messageInfo_PonSimMetrics.Size(m)
+}
+func (m *PonSimMetrics) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimMetrics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimMetrics proto.InternalMessageInfo
+
+func (m *PonSimMetrics) GetDevice() string {
+	if m != nil {
+		return m.Device
+	}
+	return ""
+}
+
+func (m *PonSimMetrics) GetMetrics() []*PonSimPortMetrics {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+type PonSimMetricsRequest struct {
+	Port                 int32    `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PonSimMetricsRequest) Reset()         { *m = PonSimMetricsRequest{} }
+func (m *PonSimMetricsRequest) String() string { return proto.CompactTextString(m) }
+func (*PonSimMetricsRequest) ProtoMessage()    {}
+func (*PonSimMetricsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_352253851b8ea7c0, []int{7}
+}
+
+func (m *PonSimMetricsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PonSimMetricsRequest.Unmarshal(m, b)
+}
+func (m *PonSimMetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PonSimMetricsRequest.Marshal(b, m, deterministic)
+}
+func (m *PonSimMetricsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PonSimMetricsRequest.Merge(m, src)
+}
+func (m *PonSimMetricsRequest) XXX_Size() int {
+	return xxx_messageInfo_PonSimMetricsRequest.Size(m)
+}
+func (m *PonSimMetricsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PonSimMetricsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PonSimMetricsRequest proto.InternalMessageInfo
+
+func (m *PonSimMetricsRequest) GetPort() int32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*PonSimOnuDeviceInfo)(nil), "voltha.PonSimOnuDeviceInfo")
+	proto.RegisterType((*PonSimDeviceInfo)(nil), "voltha.PonSimDeviceInfo")
+	proto.RegisterType((*FlowTable)(nil), "voltha.FlowTable")
+	proto.RegisterType((*PonSimFrame)(nil), "voltha.PonSimFrame")
+	proto.RegisterType((*PonSimPacketCounter)(nil), "voltha.PonSimPacketCounter")
+	proto.RegisterType((*PonSimPortMetrics)(nil), "voltha.PonSimPortMetrics")
+	proto.RegisterType((*PonSimMetrics)(nil), "voltha.PonSimMetrics")
+	proto.RegisterType((*PonSimMetricsRequest)(nil), "voltha.PonSimMetricsRequest")
+}
+
+func init() { proto.RegisterFile("voltha_protos/ponsim.proto", fileDescriptor_352253851b8ea7c0) }
+
+var fileDescriptor_352253851b8ea7c0 = []byte{
+	// 563 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0x5f, 0x6f, 0xd3, 0x30,
+	0x14, 0xc5, 0xdb, 0x6e, 0xeb, 0x9f, 0xbb, 0x15, 0x98, 0x3b, 0xa6, 0xae, 0x7d, 0xa0, 0x32, 0x2f,
+	0x15, 0x12, 0x09, 0x5b, 0xc5, 0x0b, 0x48, 0x80, 0x18, 0xdb, 0xc4, 0x03, 0xa3, 0x72, 0xd9, 0x0b,
+	0x42, 0x44, 0x69, 0xe2, 0x66, 0x11, 0x89, 0x6f, 0x48, 0xec, 0x4e, 0xfb, 0x86, 0x7c, 0x2c, 0x14,
+	0x3b, 0xa1, 0xcd, 0xd4, 0xf2, 0x66, 0x5f, 0x9f, 0xfe, 0xce, 0x3d, 0x37, 0x57, 0x85, 0xc1, 0x12,
+	0x23, 0x79, 0xeb, 0x3a, 0x49, 0x8a, 0x12, 0x33, 0x3b, 0x41, 0x91, 0x85, 0xb1, 0xa5, 0x6f, 0xa4,
+	0x69, 0xde, 0x06, 0xc3, 0x00, 0x31, 0x88, 0xb8, 0xad, 0xab, 0x73, 0xb5, 0xb0, 0x79, 0x9c, 0xc8,
+	0x7b, 0x23, 0x1a, 0x3c, 0xab, 0x02, 0x30, 0xe1, 0x62, 0x11, 0xe1, 0x9d, 0x73, 0x3a, 0x31, 0x02,
+	0x7a, 0x03, 0xbd, 0x29, 0x8a, 0x59, 0x18, 0x7f, 0x15, 0xea, 0x13, 0x5f, 0x86, 0x1e, 0xff, 0x2c,
+	0x16, 0x48, 0x4e, 0xa0, 0xad, 0x44, 0xe8, 0x24, 0x98, 0xca, 0x7e, 0x7d, 0x54, 0x1f, 0xef, 0xb1,
+	0x96, 0x12, 0xe1, 0x14, 0x53, 0x49, 0x9e, 0x43, 0x37, 0xe3, 0x69, 0xe8, 0x46, 0x8e, 0x50, 0xf1,
+	0x9c, 0xa7, 0xfd, 0xc6, 0xa8, 0x3e, 0xee, 0xb0, 0x03, 0x53, 0xbc, 0xd6, 0x35, 0xfa, 0x13, 0x9e,
+	0x18, 0x6c, 0x95, 0x29, 0x1e, 0x30, 0x45, 0xc1, 0xb4, 0x61, 0x17, 0x85, 0xca, 0xfa, 0x8d, 0xd1,
+	0xce, 0x78, 0xff, 0x6c, 0x68, 0x99, 0xae, 0xad, 0x0d, 0x9d, 0x31, 0x2d, 0xa4, 0x0c, 0x3a, 0x97,
+	0x11, 0xde, 0x7d, 0x73, 0xe7, 0x11, 0x27, 0x04, 0x76, 0xd7, 0xa0, 0xfa, 0x4c, 0x4e, 0x61, 0x2f,
+	0x0f, 0xba, 0x42, 0xae, 0x47, 0xc7, 0x45, 0xe2, 0xe8, 0x73, 0x26, 0x5d, 0x99, 0x31, 0xa3, 0xa4,
+	0x0c, 0xf6, 0x8d, 0xe1, 0x65, 0xea, 0xc6, 0x9c, 0x3c, 0x82, 0x46, 0xe8, 0x6b, 0x66, 0x87, 0x35,
+	0x42, 0x9f, 0xf4, 0xa1, 0x95, 0xb8, 0xf7, 0x11, 0xba, 0xbe, 0x4e, 0x7c, 0xc0, 0xca, 0x6b, 0x1e,
+	0x0c, 0x95, 0x34, 0xc1, 0x76, 0x4c, 0x30, 0x54, 0x32, 0x0f, 0x46, 0xdf, 0x97, 0xe3, 0x9d, 0xba,
+	0xde, 0x2f, 0x2e, 0xcf, 0x51, 0x09, 0xc9, 0xd3, 0xbc, 0x63, 0xe1, 0xc6, 0xbc, 0xa0, 0xeb, 0x33,
+	0x39, 0x82, 0xbd, 0xa5, 0x1b, 0x29, 0xae, 0xe9, 0x3b, 0xcc, 0x5c, 0x68, 0x00, 0x87, 0x05, 0x00,
+	0x53, 0xf9, 0x85, 0xcb, 0x34, 0xf4, 0x32, 0x32, 0x84, 0x4e, 0x6e, 0xe6, 0xac, 0x31, 0xda, 0x79,
+	0xe1, 0x3a, 0xe7, 0xbc, 0xce, 0xfb, 0xcc, 0xcd, 0xb6, 0x8c, 0xb3, 0xd2, 0x09, 0x2b, 0xb5, 0xf4,
+	0x07, 0x74, 0xcd, 0x7b, 0x69, 0x72, 0x0c, 0x4d, 0x5f, 0x8f, 0xbd, 0x70, 0x28, 0x6e, 0x64, 0x02,
+	0xad, 0xd8, 0x48, 0x0a, 0xfe, 0xc9, 0x03, 0xfe, 0xaa, 0x51, 0x56, 0x2a, 0xe9, 0x0b, 0x38, 0xaa,
+	0xd0, 0x19, 0xff, 0xad, 0x78, 0x26, 0x37, 0x7d, 0xba, 0xb3, 0x3f, 0x0d, 0x68, 0x1a, 0x31, 0x79,
+	0x03, 0x9d, 0x19, 0x17, 0xbe, 0xf9, 0x20, 0xbd, 0xaa, 0x8f, 0x2e, 0x0e, 0x8e, 0x2d, 0xb3, 0xfe,
+	0x56, 0xb9, 0xfe, 0xd6, 0x45, 0xbe, 0xfe, 0xb4, 0x46, 0x3e, 0x40, 0x97, 0x71, 0x8f, 0x87, 0x4b,
+	0xae, 0x95, 0x19, 0xd9, 0x22, 0x1d, 0x6c, 0xe2, 0xd2, 0xda, 0xab, 0x3a, 0x39, 0x87, 0xee, 0x15,
+	0x97, 0x6b, 0x1b, 0xbc, 0x8d, 0xd0, 0xaf, 0x12, 0x56, 0xbf, 0xa0, 0x35, 0xf2, 0x0e, 0x1e, 0xdf,
+	0x24, 0xbe, 0x2b, 0xf9, 0x6a, 0x5f, 0x0f, 0x4b, 0xf9, 0xbf, 0xd2, 0x7f, 0x62, 0xbc, 0x85, 0xf6,
+	0x15, 0x97, 0xb3, 0x7c, 0x51, 0xb7, 0xfa, 0x3f, 0xad, 0xfa, 0x17, 0x33, 0xa6, 0xb5, 0x8f, 0x17,
+	0xd0, 0xc3, 0x34, 0xd0, 0xbb, 0xef, 0x61, 0xea, 0x17, 0xb2, 0xef, 0x56, 0x10, 0xca, 0x5b, 0x35,
+	0xb7, 0x3c, 0x8c, 0xed, 0xf2, 0xcd, 0x36, 0x6f, 0x2f, 0x8b, 0x7f, 0x8a, 0xe5, 0xc4, 0x0e, 0xb0,
+	0xa8, 0xcd, 0x9b, 0xba, 0x38, 0xf9, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x72, 0x9e, 0xcb, 0x8f,
+	0x04, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// PonSimClient is the client API for PonSim service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type PonSimClient interface {
+	SendFrame(ctx context.Context, in *PonSimFrame, opts ...grpc.CallOption) (*empty.Empty, error)
+	ReceiveFrames(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (PonSim_ReceiveFramesClient, error)
+	GetDeviceInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimDeviceInfo, error)
+	UpdateFlowTable(ctx context.Context, in *FlowTable, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimMetrics, error)
+}
+
+type ponSimClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewPonSimClient(cc *grpc.ClientConn) PonSimClient {
+	return &ponSimClient{cc}
+}
+
+func (c *ponSimClient) SendFrame(ctx context.Context, in *PonSimFrame, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.PonSim/SendFrame", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *ponSimClient) ReceiveFrames(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (PonSim_ReceiveFramesClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_PonSim_serviceDesc.Streams[0], "/voltha.PonSim/ReceiveFrames", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &ponSimReceiveFramesClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type PonSim_ReceiveFramesClient interface {
+	Recv() (*PonSimFrame, error)
+	grpc.ClientStream
+}
+
+type ponSimReceiveFramesClient struct {
+	grpc.ClientStream
+}
+
+func (x *ponSimReceiveFramesClient) Recv() (*PonSimFrame, error) {
+	m := new(PonSimFrame)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *ponSimClient) GetDeviceInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimDeviceInfo, error) {
+	out := new(PonSimDeviceInfo)
+	err := c.cc.Invoke(ctx, "/voltha.PonSim/GetDeviceInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *ponSimClient) UpdateFlowTable(ctx context.Context, in *FlowTable, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.PonSim/UpdateFlowTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *ponSimClient) GetStats(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*PonSimMetrics, error) {
+	out := new(PonSimMetrics)
+	err := c.cc.Invoke(ctx, "/voltha.PonSim/GetStats", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// PonSimServer is the server API for PonSim service.
+type PonSimServer interface {
+	SendFrame(context.Context, *PonSimFrame) (*empty.Empty, error)
+	ReceiveFrames(*empty.Empty, PonSim_ReceiveFramesServer) error
+	GetDeviceInfo(context.Context, *empty.Empty) (*PonSimDeviceInfo, error)
+	UpdateFlowTable(context.Context, *FlowTable) (*empty.Empty, error)
+	GetStats(context.Context, *empty.Empty) (*PonSimMetrics, error)
+}
+
+func RegisterPonSimServer(s *grpc.Server, srv PonSimServer) {
+	s.RegisterService(&_PonSim_serviceDesc, srv)
+}
+
+func _PonSim_SendFrame_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PonSimFrame)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PonSimServer).SendFrame(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.PonSim/SendFrame",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PonSimServer).SendFrame(ctx, req.(*PonSimFrame))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _PonSim_ReceiveFrames_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(empty.Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(PonSimServer).ReceiveFrames(m, &ponSimReceiveFramesServer{stream})
+}
+
+type PonSim_ReceiveFramesServer interface {
+	Send(*PonSimFrame) error
+	grpc.ServerStream
+}
+
+type ponSimReceiveFramesServer struct {
+	grpc.ServerStream
+}
+
+func (x *ponSimReceiveFramesServer) Send(m *PonSimFrame) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _PonSim_GetDeviceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PonSimServer).GetDeviceInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.PonSim/GetDeviceInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PonSimServer).GetDeviceInfo(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _PonSim_UpdateFlowTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(FlowTable)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PonSimServer).UpdateFlowTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.PonSim/UpdateFlowTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PonSimServer).UpdateFlowTable(ctx, req.(*FlowTable))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _PonSim_GetStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(PonSimServer).GetStats(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.PonSim/GetStats",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(PonSimServer).GetStats(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _PonSim_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.PonSim",
+	HandlerType: (*PonSimServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "SendFrame",
+			Handler:    _PonSim_SendFrame_Handler,
+		},
+		{
+			MethodName: "GetDeviceInfo",
+			Handler:    _PonSim_GetDeviceInfo_Handler,
+		},
+		{
+			MethodName: "UpdateFlowTable",
+			Handler:    _PonSim_UpdateFlowTable_Handler,
+		},
+		{
+			MethodName: "GetStats",
+			Handler:    _PonSim_GetStats_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "ReceiveFrames",
+			Handler:       _PonSim_ReceiveFrames_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "voltha_protos/ponsim.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/voltha.pb.go
new file mode 100644
index 0000000..83fb298
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/voltha/voltha.pb.go
@@ -0,0 +1,4508 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/voltha.proto
+
+package voltha
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v3/go/common"
+	omci "github.com/opencord/voltha-protos/v3/go/omci"
+	openflow_13 "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// ChildNode from public import voltha_protos/meta.proto
+type ChildNode = common.ChildNode
+
+// Access from public import voltha_protos/meta.proto
+type Access = common.Access
+
+var Access_name = common.Access_name
+var Access_value = common.Access_value
+
+const Access_CONFIG = Access(common.Access_CONFIG)
+const Access_READ_ONLY = Access(common.Access_READ_ONLY)
+const Access_REAL_TIME = Access(common.Access_REAL_TIME)
+
+var E_ChildNode = common.E_ChildNode
+
+var E_Access = common.E_Access
+
+// ID from public import voltha_protos/common.proto
+type ID = common.ID
+
+// IDs from public import voltha_protos/common.proto
+type IDs = common.IDs
+
+// AdminState from public import voltha_protos/common.proto
+type AdminState = common.AdminState
+
+// OperStatus from public import voltha_protos/common.proto
+type OperStatus = common.OperStatus
+
+// ConnectStatus from public import voltha_protos/common.proto
+type ConnectStatus = common.ConnectStatus
+
+// OperationResp from public import voltha_protos/common.proto
+type OperationResp = common.OperationResp
+
+// TestModeKeys from public import voltha_protos/common.proto
+type TestModeKeys = common.TestModeKeys
+
+var TestModeKeys_name = common.TestModeKeys_name
+var TestModeKeys_value = common.TestModeKeys_value
+
+const TestModeKeys_api_test = TestModeKeys(common.TestModeKeys_api_test)
+
+// AdminState_Types from public import voltha_protos/common.proto
+type AdminState_Types = common.AdminState_Types
+
+var AdminState_Types_name = common.AdminState_Types_name
+var AdminState_Types_value = common.AdminState_Types_value
+
+const AdminState_UNKNOWN = AdminState_Types(common.AdminState_UNKNOWN)
+const AdminState_PREPROVISIONED = AdminState_Types(common.AdminState_PREPROVISIONED)
+const AdminState_ENABLED = AdminState_Types(common.AdminState_ENABLED)
+const AdminState_DISABLED = AdminState_Types(common.AdminState_DISABLED)
+const AdminState_DOWNLOADING_IMAGE = AdminState_Types(common.AdminState_DOWNLOADING_IMAGE)
+const AdminState_DELETED = AdminState_Types(common.AdminState_DELETED)
+
+// OperStatus_Types from public import voltha_protos/common.proto
+type OperStatus_Types = common.OperStatus_Types
+
+var OperStatus_Types_name = common.OperStatus_Types_name
+var OperStatus_Types_value = common.OperStatus_Types_value
+
+const OperStatus_UNKNOWN = OperStatus_Types(common.OperStatus_UNKNOWN)
+const OperStatus_DISCOVERED = OperStatus_Types(common.OperStatus_DISCOVERED)
+const OperStatus_ACTIVATING = OperStatus_Types(common.OperStatus_ACTIVATING)
+const OperStatus_TESTING = OperStatus_Types(common.OperStatus_TESTING)
+const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
+const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
+
+// ConnectStatus_Types from public import voltha_protos/common.proto
+type ConnectStatus_Types = common.ConnectStatus_Types
+
+var ConnectStatus_Types_name = common.ConnectStatus_Types_name
+var ConnectStatus_Types_value = common.ConnectStatus_Types_value
+
+const ConnectStatus_UNKNOWN = ConnectStatus_Types(common.ConnectStatus_UNKNOWN)
+const ConnectStatus_UNREACHABLE = ConnectStatus_Types(common.ConnectStatus_UNREACHABLE)
+const ConnectStatus_REACHABLE = ConnectStatus_Types(common.ConnectStatus_REACHABLE)
+
+// OperationResp_OperationReturnCode from public import voltha_protos/common.proto
+type OperationResp_OperationReturnCode = common.OperationResp_OperationReturnCode
+
+var OperationResp_OperationReturnCode_name = common.OperationResp_OperationReturnCode_name
+var OperationResp_OperationReturnCode_value = common.OperationResp_OperationReturnCode_value
+
+const OperationResp_OPERATION_SUCCESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_SUCCESS)
+const OperationResp_OPERATION_FAILURE = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_FAILURE)
+const OperationResp_OPERATION_UNSUPPORTED = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_UNSUPPORTED)
+
+// OfpHeader from public import voltha_protos/openflow_13.proto
+type OfpHeader = openflow_13.OfpHeader
+
+// OfpHelloElemHeader from public import voltha_protos/openflow_13.proto
+type OfpHelloElemHeader = openflow_13.OfpHelloElemHeader
+type OfpHelloElemHeader_Versionbitmap = openflow_13.OfpHelloElemHeader_Versionbitmap
+
+// OfpHelloElemVersionbitmap from public import voltha_protos/openflow_13.proto
+type OfpHelloElemVersionbitmap = openflow_13.OfpHelloElemVersionbitmap
+
+// OfpHello from public import voltha_protos/openflow_13.proto
+type OfpHello = openflow_13.OfpHello
+
+// OfpSwitchConfig from public import voltha_protos/openflow_13.proto
+type OfpSwitchConfig = openflow_13.OfpSwitchConfig
+
+// OfpTableMod from public import voltha_protos/openflow_13.proto
+type OfpTableMod = openflow_13.OfpTableMod
+
+// OfpPort from public import voltha_protos/openflow_13.proto
+type OfpPort = openflow_13.OfpPort
+
+// OfpSwitchFeatures from public import voltha_protos/openflow_13.proto
+type OfpSwitchFeatures = openflow_13.OfpSwitchFeatures
+
+// OfpPortStatus from public import voltha_protos/openflow_13.proto
+type OfpPortStatus = openflow_13.OfpPortStatus
+
+// OfpPortMod from public import voltha_protos/openflow_13.proto
+type OfpPortMod = openflow_13.OfpPortMod
+
+// OfpMatch from public import voltha_protos/openflow_13.proto
+type OfpMatch = openflow_13.OfpMatch
+
+// OfpOxmField from public import voltha_protos/openflow_13.proto
+type OfpOxmField = openflow_13.OfpOxmField
+type OfpOxmField_OfbField = openflow_13.OfpOxmField_OfbField
+type OfpOxmField_ExperimenterField = openflow_13.OfpOxmField_ExperimenterField
+
+// OfpOxmOfbField from public import voltha_protos/openflow_13.proto
+type OfpOxmOfbField = openflow_13.OfpOxmOfbField
+type OfpOxmOfbField_Port = openflow_13.OfpOxmOfbField_Port
+type OfpOxmOfbField_PhysicalPort = openflow_13.OfpOxmOfbField_PhysicalPort
+type OfpOxmOfbField_TableMetadata = openflow_13.OfpOxmOfbField_TableMetadata
+type OfpOxmOfbField_EthDst = openflow_13.OfpOxmOfbField_EthDst
+type OfpOxmOfbField_EthSrc = openflow_13.OfpOxmOfbField_EthSrc
+type OfpOxmOfbField_EthType = openflow_13.OfpOxmOfbField_EthType
+type OfpOxmOfbField_VlanVid = openflow_13.OfpOxmOfbField_VlanVid
+type OfpOxmOfbField_VlanPcp = openflow_13.OfpOxmOfbField_VlanPcp
+type OfpOxmOfbField_IpDscp = openflow_13.OfpOxmOfbField_IpDscp
+type OfpOxmOfbField_IpEcn = openflow_13.OfpOxmOfbField_IpEcn
+type OfpOxmOfbField_IpProto = openflow_13.OfpOxmOfbField_IpProto
+type OfpOxmOfbField_Ipv4Src = openflow_13.OfpOxmOfbField_Ipv4Src
+type OfpOxmOfbField_Ipv4Dst = openflow_13.OfpOxmOfbField_Ipv4Dst
+type OfpOxmOfbField_TcpSrc = openflow_13.OfpOxmOfbField_TcpSrc
+type OfpOxmOfbField_TcpDst = openflow_13.OfpOxmOfbField_TcpDst
+type OfpOxmOfbField_UdpSrc = openflow_13.OfpOxmOfbField_UdpSrc
+type OfpOxmOfbField_UdpDst = openflow_13.OfpOxmOfbField_UdpDst
+type OfpOxmOfbField_SctpSrc = openflow_13.OfpOxmOfbField_SctpSrc
+type OfpOxmOfbField_SctpDst = openflow_13.OfpOxmOfbField_SctpDst
+type OfpOxmOfbField_Icmpv4Type = openflow_13.OfpOxmOfbField_Icmpv4Type
+type OfpOxmOfbField_Icmpv4Code = openflow_13.OfpOxmOfbField_Icmpv4Code
+type OfpOxmOfbField_ArpOp = openflow_13.OfpOxmOfbField_ArpOp
+type OfpOxmOfbField_ArpSpa = openflow_13.OfpOxmOfbField_ArpSpa
+type OfpOxmOfbField_ArpTpa = openflow_13.OfpOxmOfbField_ArpTpa
+type OfpOxmOfbField_ArpSha = openflow_13.OfpOxmOfbField_ArpSha
+type OfpOxmOfbField_ArpTha = openflow_13.OfpOxmOfbField_ArpTha
+type OfpOxmOfbField_Ipv6Src = openflow_13.OfpOxmOfbField_Ipv6Src
+type OfpOxmOfbField_Ipv6Dst = openflow_13.OfpOxmOfbField_Ipv6Dst
+type OfpOxmOfbField_Ipv6Flabel = openflow_13.OfpOxmOfbField_Ipv6Flabel
+type OfpOxmOfbField_Icmpv6Type = openflow_13.OfpOxmOfbField_Icmpv6Type
+type OfpOxmOfbField_Icmpv6Code = openflow_13.OfpOxmOfbField_Icmpv6Code
+type OfpOxmOfbField_Ipv6NdTarget = openflow_13.OfpOxmOfbField_Ipv6NdTarget
+type OfpOxmOfbField_Ipv6NdSsl = openflow_13.OfpOxmOfbField_Ipv6NdSsl
+type OfpOxmOfbField_Ipv6NdTll = openflow_13.OfpOxmOfbField_Ipv6NdTll
+type OfpOxmOfbField_MplsLabel = openflow_13.OfpOxmOfbField_MplsLabel
+type OfpOxmOfbField_MplsTc = openflow_13.OfpOxmOfbField_MplsTc
+type OfpOxmOfbField_MplsBos = openflow_13.OfpOxmOfbField_MplsBos
+type OfpOxmOfbField_PbbIsid = openflow_13.OfpOxmOfbField_PbbIsid
+type OfpOxmOfbField_TunnelId = openflow_13.OfpOxmOfbField_TunnelId
+type OfpOxmOfbField_Ipv6Exthdr = openflow_13.OfpOxmOfbField_Ipv6Exthdr
+type OfpOxmOfbField_TableMetadataMask = openflow_13.OfpOxmOfbField_TableMetadataMask
+type OfpOxmOfbField_EthDstMask = openflow_13.OfpOxmOfbField_EthDstMask
+type OfpOxmOfbField_EthSrcMask = openflow_13.OfpOxmOfbField_EthSrcMask
+type OfpOxmOfbField_VlanVidMask = openflow_13.OfpOxmOfbField_VlanVidMask
+type OfpOxmOfbField_Ipv4SrcMask = openflow_13.OfpOxmOfbField_Ipv4SrcMask
+type OfpOxmOfbField_Ipv4DstMask = openflow_13.OfpOxmOfbField_Ipv4DstMask
+type OfpOxmOfbField_ArpSpaMask = openflow_13.OfpOxmOfbField_ArpSpaMask
+type OfpOxmOfbField_ArpTpaMask = openflow_13.OfpOxmOfbField_ArpTpaMask
+type OfpOxmOfbField_Ipv6SrcMask = openflow_13.OfpOxmOfbField_Ipv6SrcMask
+type OfpOxmOfbField_Ipv6DstMask = openflow_13.OfpOxmOfbField_Ipv6DstMask
+type OfpOxmOfbField_Ipv6FlabelMask = openflow_13.OfpOxmOfbField_Ipv6FlabelMask
+type OfpOxmOfbField_PbbIsidMask = openflow_13.OfpOxmOfbField_PbbIsidMask
+type OfpOxmOfbField_TunnelIdMask = openflow_13.OfpOxmOfbField_TunnelIdMask
+type OfpOxmOfbField_Ipv6ExthdrMask = openflow_13.OfpOxmOfbField_Ipv6ExthdrMask
+
+// OfpOxmExperimenterField from public import voltha_protos/openflow_13.proto
+type OfpOxmExperimenterField = openflow_13.OfpOxmExperimenterField
+
+// OfpAction from public import voltha_protos/openflow_13.proto
+type OfpAction = openflow_13.OfpAction
+type OfpAction_Output = openflow_13.OfpAction_Output
+type OfpAction_MplsTtl = openflow_13.OfpAction_MplsTtl
+type OfpAction_Push = openflow_13.OfpAction_Push
+type OfpAction_PopMpls = openflow_13.OfpAction_PopMpls
+type OfpAction_Group = openflow_13.OfpAction_Group
+type OfpAction_NwTtl = openflow_13.OfpAction_NwTtl
+type OfpAction_SetField = openflow_13.OfpAction_SetField
+type OfpAction_Experimenter = openflow_13.OfpAction_Experimenter
+
+// OfpActionOutput from public import voltha_protos/openflow_13.proto
+type OfpActionOutput = openflow_13.OfpActionOutput
+
+// OfpActionMplsTtl from public import voltha_protos/openflow_13.proto
+type OfpActionMplsTtl = openflow_13.OfpActionMplsTtl
+
+// OfpActionPush from public import voltha_protos/openflow_13.proto
+type OfpActionPush = openflow_13.OfpActionPush
+
+// OfpActionPopMpls from public import voltha_protos/openflow_13.proto
+type OfpActionPopMpls = openflow_13.OfpActionPopMpls
+
+// OfpActionGroup from public import voltha_protos/openflow_13.proto
+type OfpActionGroup = openflow_13.OfpActionGroup
+
+// OfpActionNwTtl from public import voltha_protos/openflow_13.proto
+type OfpActionNwTtl = openflow_13.OfpActionNwTtl
+
+// OfpActionSetField from public import voltha_protos/openflow_13.proto
+type OfpActionSetField = openflow_13.OfpActionSetField
+
+// OfpActionExperimenter from public import voltha_protos/openflow_13.proto
+type OfpActionExperimenter = openflow_13.OfpActionExperimenter
+
+// OfpInstruction from public import voltha_protos/openflow_13.proto
+type OfpInstruction = openflow_13.OfpInstruction
+type OfpInstruction_GotoTable = openflow_13.OfpInstruction_GotoTable
+type OfpInstruction_WriteMetadata = openflow_13.OfpInstruction_WriteMetadata
+type OfpInstruction_Actions = openflow_13.OfpInstruction_Actions
+type OfpInstruction_Meter = openflow_13.OfpInstruction_Meter
+type OfpInstruction_Experimenter = openflow_13.OfpInstruction_Experimenter
+
+// OfpInstructionGotoTable from public import voltha_protos/openflow_13.proto
+type OfpInstructionGotoTable = openflow_13.OfpInstructionGotoTable
+
+// OfpInstructionWriteMetadata from public import voltha_protos/openflow_13.proto
+type OfpInstructionWriteMetadata = openflow_13.OfpInstructionWriteMetadata
+
+// OfpInstructionActions from public import voltha_protos/openflow_13.proto
+type OfpInstructionActions = openflow_13.OfpInstructionActions
+
+// OfpInstructionMeter from public import voltha_protos/openflow_13.proto
+type OfpInstructionMeter = openflow_13.OfpInstructionMeter
+
+// OfpInstructionExperimenter from public import voltha_protos/openflow_13.proto
+type OfpInstructionExperimenter = openflow_13.OfpInstructionExperimenter
+
+// OfpFlowMod from public import voltha_protos/openflow_13.proto
+type OfpFlowMod = openflow_13.OfpFlowMod
+
+// OfpBucket from public import voltha_protos/openflow_13.proto
+type OfpBucket = openflow_13.OfpBucket
+
+// OfpGroupMod from public import voltha_protos/openflow_13.proto
+type OfpGroupMod = openflow_13.OfpGroupMod
+
+// OfpPacketOut from public import voltha_protos/openflow_13.proto
+type OfpPacketOut = openflow_13.OfpPacketOut
+
+// OfpPacketIn from public import voltha_protos/openflow_13.proto
+type OfpPacketIn = openflow_13.OfpPacketIn
+
+// OfpFlowRemoved from public import voltha_protos/openflow_13.proto
+type OfpFlowRemoved = openflow_13.OfpFlowRemoved
+
+// OfpMeterBandHeader from public import voltha_protos/openflow_13.proto
+type OfpMeterBandHeader = openflow_13.OfpMeterBandHeader
+type OfpMeterBandHeader_Drop = openflow_13.OfpMeterBandHeader_Drop
+type OfpMeterBandHeader_DscpRemark = openflow_13.OfpMeterBandHeader_DscpRemark
+type OfpMeterBandHeader_Experimenter = openflow_13.OfpMeterBandHeader_Experimenter
+
+// OfpMeterBandDrop from public import voltha_protos/openflow_13.proto
+type OfpMeterBandDrop = openflow_13.OfpMeterBandDrop
+
+// OfpMeterBandDscpRemark from public import voltha_protos/openflow_13.proto
+type OfpMeterBandDscpRemark = openflow_13.OfpMeterBandDscpRemark
+
+// OfpMeterBandExperimenter from public import voltha_protos/openflow_13.proto
+type OfpMeterBandExperimenter = openflow_13.OfpMeterBandExperimenter
+
+// OfpMeterMod from public import voltha_protos/openflow_13.proto
+type OfpMeterMod = openflow_13.OfpMeterMod
+
+// OfpErrorMsg from public import voltha_protos/openflow_13.proto
+type OfpErrorMsg = openflow_13.OfpErrorMsg
+
+// OfpErrorExperimenterMsg from public import voltha_protos/openflow_13.proto
+type OfpErrorExperimenterMsg = openflow_13.OfpErrorExperimenterMsg
+
+// OfpMultipartRequest from public import voltha_protos/openflow_13.proto
+type OfpMultipartRequest = openflow_13.OfpMultipartRequest
+
+// OfpMultipartReply from public import voltha_protos/openflow_13.proto
+type OfpMultipartReply = openflow_13.OfpMultipartReply
+
+// OfpDesc from public import voltha_protos/openflow_13.proto
+type OfpDesc = openflow_13.OfpDesc
+
+// OfpFlowStatsRequest from public import voltha_protos/openflow_13.proto
+type OfpFlowStatsRequest = openflow_13.OfpFlowStatsRequest
+
+// OfpFlowStats from public import voltha_protos/openflow_13.proto
+type OfpFlowStats = openflow_13.OfpFlowStats
+
+// OfpAggregateStatsRequest from public import voltha_protos/openflow_13.proto
+type OfpAggregateStatsRequest = openflow_13.OfpAggregateStatsRequest
+
+// OfpAggregateStatsReply from public import voltha_protos/openflow_13.proto
+type OfpAggregateStatsReply = openflow_13.OfpAggregateStatsReply
+
+// OfpTableFeatureProperty from public import voltha_protos/openflow_13.proto
+type OfpTableFeatureProperty = openflow_13.OfpTableFeatureProperty
+type OfpTableFeatureProperty_Instructions = openflow_13.OfpTableFeatureProperty_Instructions
+type OfpTableFeatureProperty_NextTables = openflow_13.OfpTableFeatureProperty_NextTables
+type OfpTableFeatureProperty_Actions = openflow_13.OfpTableFeatureProperty_Actions
+type OfpTableFeatureProperty_Oxm = openflow_13.OfpTableFeatureProperty_Oxm
+type OfpTableFeatureProperty_Experimenter = openflow_13.OfpTableFeatureProperty_Experimenter
+
+// OfpTableFeaturePropInstructions from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropInstructions = openflow_13.OfpTableFeaturePropInstructions
+
+// OfpTableFeaturePropNextTables from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropNextTables = openflow_13.OfpTableFeaturePropNextTables
+
+// OfpTableFeaturePropActions from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropActions = openflow_13.OfpTableFeaturePropActions
+
+// OfpTableFeaturePropOxm from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropOxm = openflow_13.OfpTableFeaturePropOxm
+
+// OfpTableFeaturePropExperimenter from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropExperimenter = openflow_13.OfpTableFeaturePropExperimenter
+
+// OfpTableFeatures from public import voltha_protos/openflow_13.proto
+type OfpTableFeatures = openflow_13.OfpTableFeatures
+
+// OfpTableStats from public import voltha_protos/openflow_13.proto
+type OfpTableStats = openflow_13.OfpTableStats
+
+// OfpPortStatsRequest from public import voltha_protos/openflow_13.proto
+type OfpPortStatsRequest = openflow_13.OfpPortStatsRequest
+
+// OfpPortStats from public import voltha_protos/openflow_13.proto
+type OfpPortStats = openflow_13.OfpPortStats
+
+// OfpGroupStatsRequest from public import voltha_protos/openflow_13.proto
+type OfpGroupStatsRequest = openflow_13.OfpGroupStatsRequest
+
+// OfpBucketCounter from public import voltha_protos/openflow_13.proto
+type OfpBucketCounter = openflow_13.OfpBucketCounter
+
+// OfpGroupStats from public import voltha_protos/openflow_13.proto
+type OfpGroupStats = openflow_13.OfpGroupStats
+
+// OfpGroupDesc from public import voltha_protos/openflow_13.proto
+type OfpGroupDesc = openflow_13.OfpGroupDesc
+
+// OfpGroupEntry from public import voltha_protos/openflow_13.proto
+type OfpGroupEntry = openflow_13.OfpGroupEntry
+
+// OfpGroupFeatures from public import voltha_protos/openflow_13.proto
+type OfpGroupFeatures = openflow_13.OfpGroupFeatures
+
+// OfpMeterMultipartRequest from public import voltha_protos/openflow_13.proto
+type OfpMeterMultipartRequest = openflow_13.OfpMeterMultipartRequest
+
+// OfpMeterBandStats from public import voltha_protos/openflow_13.proto
+type OfpMeterBandStats = openflow_13.OfpMeterBandStats
+
+// OfpMeterStats from public import voltha_protos/openflow_13.proto
+type OfpMeterStats = openflow_13.OfpMeterStats
+
+// OfpMeterConfig from public import voltha_protos/openflow_13.proto
+type OfpMeterConfig = openflow_13.OfpMeterConfig
+
+// OfpMeterFeatures from public import voltha_protos/openflow_13.proto
+type OfpMeterFeatures = openflow_13.OfpMeterFeatures
+
+// OfpMeterEntry from public import voltha_protos/openflow_13.proto
+type OfpMeterEntry = openflow_13.OfpMeterEntry
+
+// OfpExperimenterMultipartHeader from public import voltha_protos/openflow_13.proto
+type OfpExperimenterMultipartHeader = openflow_13.OfpExperimenterMultipartHeader
+
+// OfpExperimenterHeader from public import voltha_protos/openflow_13.proto
+type OfpExperimenterHeader = openflow_13.OfpExperimenterHeader
+
+// OfpQueuePropHeader from public import voltha_protos/openflow_13.proto
+type OfpQueuePropHeader = openflow_13.OfpQueuePropHeader
+
+// OfpQueuePropMinRate from public import voltha_protos/openflow_13.proto
+type OfpQueuePropMinRate = openflow_13.OfpQueuePropMinRate
+
+// OfpQueuePropMaxRate from public import voltha_protos/openflow_13.proto
+type OfpQueuePropMaxRate = openflow_13.OfpQueuePropMaxRate
+
+// OfpQueuePropExperimenter from public import voltha_protos/openflow_13.proto
+type OfpQueuePropExperimenter = openflow_13.OfpQueuePropExperimenter
+
+// OfpPacketQueue from public import voltha_protos/openflow_13.proto
+type OfpPacketQueue = openflow_13.OfpPacketQueue
+
+// OfpQueueGetConfigRequest from public import voltha_protos/openflow_13.proto
+type OfpQueueGetConfigRequest = openflow_13.OfpQueueGetConfigRequest
+
+// OfpQueueGetConfigReply from public import voltha_protos/openflow_13.proto
+type OfpQueueGetConfigReply = openflow_13.OfpQueueGetConfigReply
+
+// OfpActionSetQueue from public import voltha_protos/openflow_13.proto
+type OfpActionSetQueue = openflow_13.OfpActionSetQueue
+
+// OfpQueueStatsRequest from public import voltha_protos/openflow_13.proto
+type OfpQueueStatsRequest = openflow_13.OfpQueueStatsRequest
+
+// OfpQueueStats from public import voltha_protos/openflow_13.proto
+type OfpQueueStats = openflow_13.OfpQueueStats
+
+// OfpRoleRequest from public import voltha_protos/openflow_13.proto
+type OfpRoleRequest = openflow_13.OfpRoleRequest
+
+// OfpAsyncConfig from public import voltha_protos/openflow_13.proto
+type OfpAsyncConfig = openflow_13.OfpAsyncConfig
+
+// MeterModUpdate from public import voltha_protos/openflow_13.proto
+type MeterModUpdate = openflow_13.MeterModUpdate
+
+// MeterStatsReply from public import voltha_protos/openflow_13.proto
+type MeterStatsReply = openflow_13.MeterStatsReply
+
+// FlowTableUpdate from public import voltha_protos/openflow_13.proto
+type FlowTableUpdate = openflow_13.FlowTableUpdate
+
+// FlowGroupTableUpdate from public import voltha_protos/openflow_13.proto
+type FlowGroupTableUpdate = openflow_13.FlowGroupTableUpdate
+
+// Flows from public import voltha_protos/openflow_13.proto
+type Flows = openflow_13.Flows
+
+// Meters from public import voltha_protos/openflow_13.proto
+type Meters = openflow_13.Meters
+
+// FlowGroups from public import voltha_protos/openflow_13.proto
+type FlowGroups = openflow_13.FlowGroups
+
+// FlowChanges from public import voltha_protos/openflow_13.proto
+type FlowChanges = openflow_13.FlowChanges
+
+// FlowGroupChanges from public import voltha_protos/openflow_13.proto
+type FlowGroupChanges = openflow_13.FlowGroupChanges
+
+// PacketIn from public import voltha_protos/openflow_13.proto
+type PacketIn = openflow_13.PacketIn
+
+// PacketOut from public import voltha_protos/openflow_13.proto
+type PacketOut = openflow_13.PacketOut
+
+// ChangeEvent from public import voltha_protos/openflow_13.proto
+type ChangeEvent = openflow_13.ChangeEvent
+type ChangeEvent_PortStatus = openflow_13.ChangeEvent_PortStatus
+
+// OfpPortNo from public import voltha_protos/openflow_13.proto
+type OfpPortNo = openflow_13.OfpPortNo
+
+var OfpPortNo_name = openflow_13.OfpPortNo_name
+var OfpPortNo_value = openflow_13.OfpPortNo_value
+
+const OfpPortNo_OFPP_INVALID = OfpPortNo(openflow_13.OfpPortNo_OFPP_INVALID)
+const OfpPortNo_OFPP_MAX = OfpPortNo(openflow_13.OfpPortNo_OFPP_MAX)
+const OfpPortNo_OFPP_IN_PORT = OfpPortNo(openflow_13.OfpPortNo_OFPP_IN_PORT)
+const OfpPortNo_OFPP_TABLE = OfpPortNo(openflow_13.OfpPortNo_OFPP_TABLE)
+const OfpPortNo_OFPP_NORMAL = OfpPortNo(openflow_13.OfpPortNo_OFPP_NORMAL)
+const OfpPortNo_OFPP_FLOOD = OfpPortNo(openflow_13.OfpPortNo_OFPP_FLOOD)
+const OfpPortNo_OFPP_ALL = OfpPortNo(openflow_13.OfpPortNo_OFPP_ALL)
+const OfpPortNo_OFPP_CONTROLLER = OfpPortNo(openflow_13.OfpPortNo_OFPP_CONTROLLER)
+const OfpPortNo_OFPP_LOCAL = OfpPortNo(openflow_13.OfpPortNo_OFPP_LOCAL)
+const OfpPortNo_OFPP_ANY = OfpPortNo(openflow_13.OfpPortNo_OFPP_ANY)
+
+// OfpType from public import voltha_protos/openflow_13.proto
+type OfpType = openflow_13.OfpType
+
+var OfpType_name = openflow_13.OfpType_name
+var OfpType_value = openflow_13.OfpType_value
+
+const OfpType_OFPT_HELLO = OfpType(openflow_13.OfpType_OFPT_HELLO)
+const OfpType_OFPT_ERROR = OfpType(openflow_13.OfpType_OFPT_ERROR)
+const OfpType_OFPT_ECHO_REQUEST = OfpType(openflow_13.OfpType_OFPT_ECHO_REQUEST)
+const OfpType_OFPT_ECHO_REPLY = OfpType(openflow_13.OfpType_OFPT_ECHO_REPLY)
+const OfpType_OFPT_EXPERIMENTER = OfpType(openflow_13.OfpType_OFPT_EXPERIMENTER)
+const OfpType_OFPT_FEATURES_REQUEST = OfpType(openflow_13.OfpType_OFPT_FEATURES_REQUEST)
+const OfpType_OFPT_FEATURES_REPLY = OfpType(openflow_13.OfpType_OFPT_FEATURES_REPLY)
+const OfpType_OFPT_GET_CONFIG_REQUEST = OfpType(openflow_13.OfpType_OFPT_GET_CONFIG_REQUEST)
+const OfpType_OFPT_GET_CONFIG_REPLY = OfpType(openflow_13.OfpType_OFPT_GET_CONFIG_REPLY)
+const OfpType_OFPT_SET_CONFIG = OfpType(openflow_13.OfpType_OFPT_SET_CONFIG)
+const OfpType_OFPT_PACKET_IN = OfpType(openflow_13.OfpType_OFPT_PACKET_IN)
+const OfpType_OFPT_FLOW_REMOVED = OfpType(openflow_13.OfpType_OFPT_FLOW_REMOVED)
+const OfpType_OFPT_PORT_STATUS = OfpType(openflow_13.OfpType_OFPT_PORT_STATUS)
+const OfpType_OFPT_PACKET_OUT = OfpType(openflow_13.OfpType_OFPT_PACKET_OUT)
+const OfpType_OFPT_FLOW_MOD = OfpType(openflow_13.OfpType_OFPT_FLOW_MOD)
+const OfpType_OFPT_GROUP_MOD = OfpType(openflow_13.OfpType_OFPT_GROUP_MOD)
+const OfpType_OFPT_PORT_MOD = OfpType(openflow_13.OfpType_OFPT_PORT_MOD)
+const OfpType_OFPT_TABLE_MOD = OfpType(openflow_13.OfpType_OFPT_TABLE_MOD)
+const OfpType_OFPT_MULTIPART_REQUEST = OfpType(openflow_13.OfpType_OFPT_MULTIPART_REQUEST)
+const OfpType_OFPT_MULTIPART_REPLY = OfpType(openflow_13.OfpType_OFPT_MULTIPART_REPLY)
+const OfpType_OFPT_BARRIER_REQUEST = OfpType(openflow_13.OfpType_OFPT_BARRIER_REQUEST)
+const OfpType_OFPT_BARRIER_REPLY = OfpType(openflow_13.OfpType_OFPT_BARRIER_REPLY)
+const OfpType_OFPT_QUEUE_GET_CONFIG_REQUEST = OfpType(openflow_13.OfpType_OFPT_QUEUE_GET_CONFIG_REQUEST)
+const OfpType_OFPT_QUEUE_GET_CONFIG_REPLY = OfpType(openflow_13.OfpType_OFPT_QUEUE_GET_CONFIG_REPLY)
+const OfpType_OFPT_ROLE_REQUEST = OfpType(openflow_13.OfpType_OFPT_ROLE_REQUEST)
+const OfpType_OFPT_ROLE_REPLY = OfpType(openflow_13.OfpType_OFPT_ROLE_REPLY)
+const OfpType_OFPT_GET_ASYNC_REQUEST = OfpType(openflow_13.OfpType_OFPT_GET_ASYNC_REQUEST)
+const OfpType_OFPT_GET_ASYNC_REPLY = OfpType(openflow_13.OfpType_OFPT_GET_ASYNC_REPLY)
+const OfpType_OFPT_SET_ASYNC = OfpType(openflow_13.OfpType_OFPT_SET_ASYNC)
+const OfpType_OFPT_METER_MOD = OfpType(openflow_13.OfpType_OFPT_METER_MOD)
+
+// OfpHelloElemType from public import voltha_protos/openflow_13.proto
+type OfpHelloElemType = openflow_13.OfpHelloElemType
+
+var OfpHelloElemType_name = openflow_13.OfpHelloElemType_name
+var OfpHelloElemType_value = openflow_13.OfpHelloElemType_value
+
+const OfpHelloElemType_OFPHET_INVALID = OfpHelloElemType(openflow_13.OfpHelloElemType_OFPHET_INVALID)
+const OfpHelloElemType_OFPHET_VERSIONBITMAP = OfpHelloElemType(openflow_13.OfpHelloElemType_OFPHET_VERSIONBITMAP)
+
+// OfpConfigFlags from public import voltha_protos/openflow_13.proto
+type OfpConfigFlags = openflow_13.OfpConfigFlags
+
+var OfpConfigFlags_name = openflow_13.OfpConfigFlags_name
+var OfpConfigFlags_value = openflow_13.OfpConfigFlags_value
+
+const OfpConfigFlags_OFPC_FRAG_NORMAL = OfpConfigFlags(openflow_13.OfpConfigFlags_OFPC_FRAG_NORMAL)
+const OfpConfigFlags_OFPC_FRAG_DROP = OfpConfigFlags(openflow_13.OfpConfigFlags_OFPC_FRAG_DROP)
+const OfpConfigFlags_OFPC_FRAG_REASM = OfpConfigFlags(openflow_13.OfpConfigFlags_OFPC_FRAG_REASM)
+const OfpConfigFlags_OFPC_FRAG_MASK = OfpConfigFlags(openflow_13.OfpConfigFlags_OFPC_FRAG_MASK)
+
+// OfpTableConfig from public import voltha_protos/openflow_13.proto
+type OfpTableConfig = openflow_13.OfpTableConfig
+
+var OfpTableConfig_name = openflow_13.OfpTableConfig_name
+var OfpTableConfig_value = openflow_13.OfpTableConfig_value
+
+const OfpTableConfig_OFPTC_INVALID = OfpTableConfig(openflow_13.OfpTableConfig_OFPTC_INVALID)
+const OfpTableConfig_OFPTC_DEPRECATED_MASK = OfpTableConfig(openflow_13.OfpTableConfig_OFPTC_DEPRECATED_MASK)
+
+// OfpTable from public import voltha_protos/openflow_13.proto
+type OfpTable = openflow_13.OfpTable
+
+var OfpTable_name = openflow_13.OfpTable_name
+var OfpTable_value = openflow_13.OfpTable_value
+
+const OfpTable_OFPTT_INVALID = OfpTable(openflow_13.OfpTable_OFPTT_INVALID)
+const OfpTable_OFPTT_MAX = OfpTable(openflow_13.OfpTable_OFPTT_MAX)
+const OfpTable_OFPTT_ALL = OfpTable(openflow_13.OfpTable_OFPTT_ALL)
+
+// OfpCapabilities from public import voltha_protos/openflow_13.proto
+type OfpCapabilities = openflow_13.OfpCapabilities
+
+var OfpCapabilities_name = openflow_13.OfpCapabilities_name
+var OfpCapabilities_value = openflow_13.OfpCapabilities_value
+
+const OfpCapabilities_OFPC_INVALID = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_INVALID)
+const OfpCapabilities_OFPC_FLOW_STATS = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_FLOW_STATS)
+const OfpCapabilities_OFPC_TABLE_STATS = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_TABLE_STATS)
+const OfpCapabilities_OFPC_PORT_STATS = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_PORT_STATS)
+const OfpCapabilities_OFPC_GROUP_STATS = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_GROUP_STATS)
+const OfpCapabilities_OFPC_IP_REASM = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_IP_REASM)
+const OfpCapabilities_OFPC_QUEUE_STATS = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_QUEUE_STATS)
+const OfpCapabilities_OFPC_PORT_BLOCKED = OfpCapabilities(openflow_13.OfpCapabilities_OFPC_PORT_BLOCKED)
+
+// OfpPortConfig from public import voltha_protos/openflow_13.proto
+type OfpPortConfig = openflow_13.OfpPortConfig
+
+var OfpPortConfig_name = openflow_13.OfpPortConfig_name
+var OfpPortConfig_value = openflow_13.OfpPortConfig_value
+
+const OfpPortConfig_OFPPC_INVALID = OfpPortConfig(openflow_13.OfpPortConfig_OFPPC_INVALID)
+const OfpPortConfig_OFPPC_PORT_DOWN = OfpPortConfig(openflow_13.OfpPortConfig_OFPPC_PORT_DOWN)
+const OfpPortConfig_OFPPC_NO_RECV = OfpPortConfig(openflow_13.OfpPortConfig_OFPPC_NO_RECV)
+const OfpPortConfig_OFPPC_NO_FWD = OfpPortConfig(openflow_13.OfpPortConfig_OFPPC_NO_FWD)
+const OfpPortConfig_OFPPC_NO_PACKET_IN = OfpPortConfig(openflow_13.OfpPortConfig_OFPPC_NO_PACKET_IN)
+
+// OfpPortState from public import voltha_protos/openflow_13.proto
+type OfpPortState = openflow_13.OfpPortState
+
+var OfpPortState_name = openflow_13.OfpPortState_name
+var OfpPortState_value = openflow_13.OfpPortState_value
+
+const OfpPortState_OFPPS_INVALID = OfpPortState(openflow_13.OfpPortState_OFPPS_INVALID)
+const OfpPortState_OFPPS_LINK_DOWN = OfpPortState(openflow_13.OfpPortState_OFPPS_LINK_DOWN)
+const OfpPortState_OFPPS_BLOCKED = OfpPortState(openflow_13.OfpPortState_OFPPS_BLOCKED)
+const OfpPortState_OFPPS_LIVE = OfpPortState(openflow_13.OfpPortState_OFPPS_LIVE)
+
+// OfpPortFeatures from public import voltha_protos/openflow_13.proto
+type OfpPortFeatures = openflow_13.OfpPortFeatures
+
+var OfpPortFeatures_name = openflow_13.OfpPortFeatures_name
+var OfpPortFeatures_value = openflow_13.OfpPortFeatures_value
+
+const OfpPortFeatures_OFPPF_INVALID = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_INVALID)
+const OfpPortFeatures_OFPPF_10MB_HD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_10MB_HD)
+const OfpPortFeatures_OFPPF_10MB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_10MB_FD)
+const OfpPortFeatures_OFPPF_100MB_HD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_100MB_HD)
+const OfpPortFeatures_OFPPF_100MB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_100MB_FD)
+const OfpPortFeatures_OFPPF_1GB_HD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_1GB_HD)
+const OfpPortFeatures_OFPPF_1GB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_1GB_FD)
+const OfpPortFeatures_OFPPF_10GB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_10GB_FD)
+const OfpPortFeatures_OFPPF_40GB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_40GB_FD)
+const OfpPortFeatures_OFPPF_100GB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_100GB_FD)
+const OfpPortFeatures_OFPPF_1TB_FD = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_1TB_FD)
+const OfpPortFeatures_OFPPF_OTHER = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_OTHER)
+const OfpPortFeatures_OFPPF_COPPER = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_COPPER)
+const OfpPortFeatures_OFPPF_FIBER = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_FIBER)
+const OfpPortFeatures_OFPPF_AUTONEG = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_AUTONEG)
+const OfpPortFeatures_OFPPF_PAUSE = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_PAUSE)
+const OfpPortFeatures_OFPPF_PAUSE_ASYM = OfpPortFeatures(openflow_13.OfpPortFeatures_OFPPF_PAUSE_ASYM)
+
+// OfpPortReason from public import voltha_protos/openflow_13.proto
+type OfpPortReason = openflow_13.OfpPortReason
+
+var OfpPortReason_name = openflow_13.OfpPortReason_name
+var OfpPortReason_value = openflow_13.OfpPortReason_value
+
+const OfpPortReason_OFPPR_ADD = OfpPortReason(openflow_13.OfpPortReason_OFPPR_ADD)
+const OfpPortReason_OFPPR_DELETE = OfpPortReason(openflow_13.OfpPortReason_OFPPR_DELETE)
+const OfpPortReason_OFPPR_MODIFY = OfpPortReason(openflow_13.OfpPortReason_OFPPR_MODIFY)
+
+// OfpMatchType from public import voltha_protos/openflow_13.proto
+type OfpMatchType = openflow_13.OfpMatchType
+
+var OfpMatchType_name = openflow_13.OfpMatchType_name
+var OfpMatchType_value = openflow_13.OfpMatchType_value
+
+const OfpMatchType_OFPMT_STANDARD = OfpMatchType(openflow_13.OfpMatchType_OFPMT_STANDARD)
+const OfpMatchType_OFPMT_OXM = OfpMatchType(openflow_13.OfpMatchType_OFPMT_OXM)
+
+// OfpOxmClass from public import voltha_protos/openflow_13.proto
+type OfpOxmClass = openflow_13.OfpOxmClass
+
+var OfpOxmClass_name = openflow_13.OfpOxmClass_name
+var OfpOxmClass_value = openflow_13.OfpOxmClass_value
+
+const OfpOxmClass_OFPXMC_NXM_0 = OfpOxmClass(openflow_13.OfpOxmClass_OFPXMC_NXM_0)
+const OfpOxmClass_OFPXMC_NXM_1 = OfpOxmClass(openflow_13.OfpOxmClass_OFPXMC_NXM_1)
+const OfpOxmClass_OFPXMC_OPENFLOW_BASIC = OfpOxmClass(openflow_13.OfpOxmClass_OFPXMC_OPENFLOW_BASIC)
+const OfpOxmClass_OFPXMC_EXPERIMENTER = OfpOxmClass(openflow_13.OfpOxmClass_OFPXMC_EXPERIMENTER)
+
+// OxmOfbFieldTypes from public import voltha_protos/openflow_13.proto
+type OxmOfbFieldTypes = openflow_13.OxmOfbFieldTypes
+
+var OxmOfbFieldTypes_name = openflow_13.OxmOfbFieldTypes_name
+var OxmOfbFieldTypes_value = openflow_13.OxmOfbFieldTypes_value
+
+const OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT)
+const OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT)
+const OxmOfbFieldTypes_OFPXMT_OFB_METADATA = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_METADATA)
+const OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE)
+const OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID)
+const OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP)
+const OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP)
+const OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN)
+const OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE)
+const OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE)
+const OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP)
+const OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA)
+const OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA)
+const OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA)
+const OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL)
+const OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE)
+const OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL)
+const OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL)
+const OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC)
+const OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS)
+const OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID)
+const OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID)
+const OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR = OxmOfbFieldTypes(openflow_13.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR)
+
+// OfpVlanId from public import voltha_protos/openflow_13.proto
+type OfpVlanId = openflow_13.OfpVlanId
+
+var OfpVlanId_name = openflow_13.OfpVlanId_name
+var OfpVlanId_value = openflow_13.OfpVlanId_value
+
+const OfpVlanId_OFPVID_NONE = OfpVlanId(openflow_13.OfpVlanId_OFPVID_NONE)
+const OfpVlanId_OFPVID_PRESENT = OfpVlanId(openflow_13.OfpVlanId_OFPVID_PRESENT)
+
+// OfpIpv6ExthdrFlags from public import voltha_protos/openflow_13.proto
+type OfpIpv6ExthdrFlags = openflow_13.OfpIpv6ExthdrFlags
+
+var OfpIpv6ExthdrFlags_name = openflow_13.OfpIpv6ExthdrFlags_name
+var OfpIpv6ExthdrFlags_value = openflow_13.OfpIpv6ExthdrFlags_value
+
+const OfpIpv6ExthdrFlags_OFPIEH_INVALID = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_INVALID)
+const OfpIpv6ExthdrFlags_OFPIEH_NONEXT = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_NONEXT)
+const OfpIpv6ExthdrFlags_OFPIEH_ESP = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_ESP)
+const OfpIpv6ExthdrFlags_OFPIEH_AUTH = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_AUTH)
+const OfpIpv6ExthdrFlags_OFPIEH_DEST = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_DEST)
+const OfpIpv6ExthdrFlags_OFPIEH_FRAG = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_FRAG)
+const OfpIpv6ExthdrFlags_OFPIEH_ROUTER = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_ROUTER)
+const OfpIpv6ExthdrFlags_OFPIEH_HOP = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_HOP)
+const OfpIpv6ExthdrFlags_OFPIEH_UNREP = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_UNREP)
+const OfpIpv6ExthdrFlags_OFPIEH_UNSEQ = OfpIpv6ExthdrFlags(openflow_13.OfpIpv6ExthdrFlags_OFPIEH_UNSEQ)
+
+// OfpActionType from public import voltha_protos/openflow_13.proto
+type OfpActionType = openflow_13.OfpActionType
+
+var OfpActionType_name = openflow_13.OfpActionType_name
+var OfpActionType_value = openflow_13.OfpActionType_value
+
+const OfpActionType_OFPAT_OUTPUT = OfpActionType(openflow_13.OfpActionType_OFPAT_OUTPUT)
+const OfpActionType_OFPAT_COPY_TTL_OUT = OfpActionType(openflow_13.OfpActionType_OFPAT_COPY_TTL_OUT)
+const OfpActionType_OFPAT_COPY_TTL_IN = OfpActionType(openflow_13.OfpActionType_OFPAT_COPY_TTL_IN)
+const OfpActionType_OFPAT_SET_MPLS_TTL = OfpActionType(openflow_13.OfpActionType_OFPAT_SET_MPLS_TTL)
+const OfpActionType_OFPAT_DEC_MPLS_TTL = OfpActionType(openflow_13.OfpActionType_OFPAT_DEC_MPLS_TTL)
+const OfpActionType_OFPAT_PUSH_VLAN = OfpActionType(openflow_13.OfpActionType_OFPAT_PUSH_VLAN)
+const OfpActionType_OFPAT_POP_VLAN = OfpActionType(openflow_13.OfpActionType_OFPAT_POP_VLAN)
+const OfpActionType_OFPAT_PUSH_MPLS = OfpActionType(openflow_13.OfpActionType_OFPAT_PUSH_MPLS)
+const OfpActionType_OFPAT_POP_MPLS = OfpActionType(openflow_13.OfpActionType_OFPAT_POP_MPLS)
+const OfpActionType_OFPAT_SET_QUEUE = OfpActionType(openflow_13.OfpActionType_OFPAT_SET_QUEUE)
+const OfpActionType_OFPAT_GROUP = OfpActionType(openflow_13.OfpActionType_OFPAT_GROUP)
+const OfpActionType_OFPAT_SET_NW_TTL = OfpActionType(openflow_13.OfpActionType_OFPAT_SET_NW_TTL)
+const OfpActionType_OFPAT_DEC_NW_TTL = OfpActionType(openflow_13.OfpActionType_OFPAT_DEC_NW_TTL)
+const OfpActionType_OFPAT_SET_FIELD = OfpActionType(openflow_13.OfpActionType_OFPAT_SET_FIELD)
+const OfpActionType_OFPAT_PUSH_PBB = OfpActionType(openflow_13.OfpActionType_OFPAT_PUSH_PBB)
+const OfpActionType_OFPAT_POP_PBB = OfpActionType(openflow_13.OfpActionType_OFPAT_POP_PBB)
+const OfpActionType_OFPAT_EXPERIMENTER = OfpActionType(openflow_13.OfpActionType_OFPAT_EXPERIMENTER)
+
+// OfpControllerMaxLen from public import voltha_protos/openflow_13.proto
+type OfpControllerMaxLen = openflow_13.OfpControllerMaxLen
+
+var OfpControllerMaxLen_name = openflow_13.OfpControllerMaxLen_name
+var OfpControllerMaxLen_value = openflow_13.OfpControllerMaxLen_value
+
+const OfpControllerMaxLen_OFPCML_INVALID = OfpControllerMaxLen(openflow_13.OfpControllerMaxLen_OFPCML_INVALID)
+const OfpControllerMaxLen_OFPCML_MAX = OfpControllerMaxLen(openflow_13.OfpControllerMaxLen_OFPCML_MAX)
+const OfpControllerMaxLen_OFPCML_NO_BUFFER = OfpControllerMaxLen(openflow_13.OfpControllerMaxLen_OFPCML_NO_BUFFER)
+
+// OfpInstructionType from public import voltha_protos/openflow_13.proto
+type OfpInstructionType = openflow_13.OfpInstructionType
+
+var OfpInstructionType_name = openflow_13.OfpInstructionType_name
+var OfpInstructionType_value = openflow_13.OfpInstructionType_value
+
+const OfpInstructionType_OFPIT_INVALID = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_INVALID)
+const OfpInstructionType_OFPIT_GOTO_TABLE = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_GOTO_TABLE)
+const OfpInstructionType_OFPIT_WRITE_METADATA = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_WRITE_METADATA)
+const OfpInstructionType_OFPIT_WRITE_ACTIONS = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_WRITE_ACTIONS)
+const OfpInstructionType_OFPIT_APPLY_ACTIONS = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_APPLY_ACTIONS)
+const OfpInstructionType_OFPIT_CLEAR_ACTIONS = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_CLEAR_ACTIONS)
+const OfpInstructionType_OFPIT_METER = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_METER)
+const OfpInstructionType_OFPIT_EXPERIMENTER = OfpInstructionType(openflow_13.OfpInstructionType_OFPIT_EXPERIMENTER)
+
+// OfpFlowModCommand from public import voltha_protos/openflow_13.proto
+type OfpFlowModCommand = openflow_13.OfpFlowModCommand
+
+var OfpFlowModCommand_name = openflow_13.OfpFlowModCommand_name
+var OfpFlowModCommand_value = openflow_13.OfpFlowModCommand_value
+
+const OfpFlowModCommand_OFPFC_ADD = OfpFlowModCommand(openflow_13.OfpFlowModCommand_OFPFC_ADD)
+const OfpFlowModCommand_OFPFC_MODIFY = OfpFlowModCommand(openflow_13.OfpFlowModCommand_OFPFC_MODIFY)
+const OfpFlowModCommand_OFPFC_MODIFY_STRICT = OfpFlowModCommand(openflow_13.OfpFlowModCommand_OFPFC_MODIFY_STRICT)
+const OfpFlowModCommand_OFPFC_DELETE = OfpFlowModCommand(openflow_13.OfpFlowModCommand_OFPFC_DELETE)
+const OfpFlowModCommand_OFPFC_DELETE_STRICT = OfpFlowModCommand(openflow_13.OfpFlowModCommand_OFPFC_DELETE_STRICT)
+
+// OfpFlowModFlags from public import voltha_protos/openflow_13.proto
+type OfpFlowModFlags = openflow_13.OfpFlowModFlags
+
+var OfpFlowModFlags_name = openflow_13.OfpFlowModFlags_name
+var OfpFlowModFlags_value = openflow_13.OfpFlowModFlags_value
+
+const OfpFlowModFlags_OFPFF_INVALID = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_INVALID)
+const OfpFlowModFlags_OFPFF_SEND_FLOW_REM = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_SEND_FLOW_REM)
+const OfpFlowModFlags_OFPFF_CHECK_OVERLAP = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_CHECK_OVERLAP)
+const OfpFlowModFlags_OFPFF_RESET_COUNTS = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_RESET_COUNTS)
+const OfpFlowModFlags_OFPFF_NO_PKT_COUNTS = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_NO_PKT_COUNTS)
+const OfpFlowModFlags_OFPFF_NO_BYT_COUNTS = OfpFlowModFlags(openflow_13.OfpFlowModFlags_OFPFF_NO_BYT_COUNTS)
+
+// OfpGroup from public import voltha_protos/openflow_13.proto
+type OfpGroup = openflow_13.OfpGroup
+
+var OfpGroup_name = openflow_13.OfpGroup_name
+var OfpGroup_value = openflow_13.OfpGroup_value
+
+const OfpGroup_OFPG_INVALID = OfpGroup(openflow_13.OfpGroup_OFPG_INVALID)
+const OfpGroup_OFPG_MAX = OfpGroup(openflow_13.OfpGroup_OFPG_MAX)
+const OfpGroup_OFPG_ALL = OfpGroup(openflow_13.OfpGroup_OFPG_ALL)
+const OfpGroup_OFPG_ANY = OfpGroup(openflow_13.OfpGroup_OFPG_ANY)
+
+// OfpGroupModCommand from public import voltha_protos/openflow_13.proto
+type OfpGroupModCommand = openflow_13.OfpGroupModCommand
+
+var OfpGroupModCommand_name = openflow_13.OfpGroupModCommand_name
+var OfpGroupModCommand_value = openflow_13.OfpGroupModCommand_value
+
+const OfpGroupModCommand_OFPGC_ADD = OfpGroupModCommand(openflow_13.OfpGroupModCommand_OFPGC_ADD)
+const OfpGroupModCommand_OFPGC_MODIFY = OfpGroupModCommand(openflow_13.OfpGroupModCommand_OFPGC_MODIFY)
+const OfpGroupModCommand_OFPGC_DELETE = OfpGroupModCommand(openflow_13.OfpGroupModCommand_OFPGC_DELETE)
+
+// OfpGroupType from public import voltha_protos/openflow_13.proto
+type OfpGroupType = openflow_13.OfpGroupType
+
+var OfpGroupType_name = openflow_13.OfpGroupType_name
+var OfpGroupType_value = openflow_13.OfpGroupType_value
+
+const OfpGroupType_OFPGT_ALL = OfpGroupType(openflow_13.OfpGroupType_OFPGT_ALL)
+const OfpGroupType_OFPGT_SELECT = OfpGroupType(openflow_13.OfpGroupType_OFPGT_SELECT)
+const OfpGroupType_OFPGT_INDIRECT = OfpGroupType(openflow_13.OfpGroupType_OFPGT_INDIRECT)
+const OfpGroupType_OFPGT_FF = OfpGroupType(openflow_13.OfpGroupType_OFPGT_FF)
+
+// OfpPacketInReason from public import voltha_protos/openflow_13.proto
+type OfpPacketInReason = openflow_13.OfpPacketInReason
+
+var OfpPacketInReason_name = openflow_13.OfpPacketInReason_name
+var OfpPacketInReason_value = openflow_13.OfpPacketInReason_value
+
+const OfpPacketInReason_OFPR_NO_MATCH = OfpPacketInReason(openflow_13.OfpPacketInReason_OFPR_NO_MATCH)
+const OfpPacketInReason_OFPR_ACTION = OfpPacketInReason(openflow_13.OfpPacketInReason_OFPR_ACTION)
+const OfpPacketInReason_OFPR_INVALID_TTL = OfpPacketInReason(openflow_13.OfpPacketInReason_OFPR_INVALID_TTL)
+
+// OfpFlowRemovedReason from public import voltha_protos/openflow_13.proto
+type OfpFlowRemovedReason = openflow_13.OfpFlowRemovedReason
+
+var OfpFlowRemovedReason_name = openflow_13.OfpFlowRemovedReason_name
+var OfpFlowRemovedReason_value = openflow_13.OfpFlowRemovedReason_value
+
+const OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT = OfpFlowRemovedReason(openflow_13.OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT)
+const OfpFlowRemovedReason_OFPRR_HARD_TIMEOUT = OfpFlowRemovedReason(openflow_13.OfpFlowRemovedReason_OFPRR_HARD_TIMEOUT)
+const OfpFlowRemovedReason_OFPRR_DELETE = OfpFlowRemovedReason(openflow_13.OfpFlowRemovedReason_OFPRR_DELETE)
+const OfpFlowRemovedReason_OFPRR_GROUP_DELETE = OfpFlowRemovedReason(openflow_13.OfpFlowRemovedReason_OFPRR_GROUP_DELETE)
+const OfpFlowRemovedReason_OFPRR_METER_DELETE = OfpFlowRemovedReason(openflow_13.OfpFlowRemovedReason_OFPRR_METER_DELETE)
+
+// OfpMeter from public import voltha_protos/openflow_13.proto
+type OfpMeter = openflow_13.OfpMeter
+
+var OfpMeter_name = openflow_13.OfpMeter_name
+var OfpMeter_value = openflow_13.OfpMeter_value
+
+const OfpMeter_OFPM_ZERO = OfpMeter(openflow_13.OfpMeter_OFPM_ZERO)
+const OfpMeter_OFPM_MAX = OfpMeter(openflow_13.OfpMeter_OFPM_MAX)
+const OfpMeter_OFPM_SLOWPATH = OfpMeter(openflow_13.OfpMeter_OFPM_SLOWPATH)
+const OfpMeter_OFPM_CONTROLLER = OfpMeter(openflow_13.OfpMeter_OFPM_CONTROLLER)
+const OfpMeter_OFPM_ALL = OfpMeter(openflow_13.OfpMeter_OFPM_ALL)
+
+// OfpMeterBandType from public import voltha_protos/openflow_13.proto
+type OfpMeterBandType = openflow_13.OfpMeterBandType
+
+var OfpMeterBandType_name = openflow_13.OfpMeterBandType_name
+var OfpMeterBandType_value = openflow_13.OfpMeterBandType_value
+
+const OfpMeterBandType_OFPMBT_INVALID = OfpMeterBandType(openflow_13.OfpMeterBandType_OFPMBT_INVALID)
+const OfpMeterBandType_OFPMBT_DROP = OfpMeterBandType(openflow_13.OfpMeterBandType_OFPMBT_DROP)
+const OfpMeterBandType_OFPMBT_DSCP_REMARK = OfpMeterBandType(openflow_13.OfpMeterBandType_OFPMBT_DSCP_REMARK)
+const OfpMeterBandType_OFPMBT_EXPERIMENTER = OfpMeterBandType(openflow_13.OfpMeterBandType_OFPMBT_EXPERIMENTER)
+
+// OfpMeterModCommand from public import voltha_protos/openflow_13.proto
+type OfpMeterModCommand = openflow_13.OfpMeterModCommand
+
+var OfpMeterModCommand_name = openflow_13.OfpMeterModCommand_name
+var OfpMeterModCommand_value = openflow_13.OfpMeterModCommand_value
+
+const OfpMeterModCommand_OFPMC_ADD = OfpMeterModCommand(openflow_13.OfpMeterModCommand_OFPMC_ADD)
+const OfpMeterModCommand_OFPMC_MODIFY = OfpMeterModCommand(openflow_13.OfpMeterModCommand_OFPMC_MODIFY)
+const OfpMeterModCommand_OFPMC_DELETE = OfpMeterModCommand(openflow_13.OfpMeterModCommand_OFPMC_DELETE)
+
+// OfpMeterFlags from public import voltha_protos/openflow_13.proto
+type OfpMeterFlags = openflow_13.OfpMeterFlags
+
+var OfpMeterFlags_name = openflow_13.OfpMeterFlags_name
+var OfpMeterFlags_value = openflow_13.OfpMeterFlags_value
+
+const OfpMeterFlags_OFPMF_INVALID = OfpMeterFlags(openflow_13.OfpMeterFlags_OFPMF_INVALID)
+const OfpMeterFlags_OFPMF_KBPS = OfpMeterFlags(openflow_13.OfpMeterFlags_OFPMF_KBPS)
+const OfpMeterFlags_OFPMF_PKTPS = OfpMeterFlags(openflow_13.OfpMeterFlags_OFPMF_PKTPS)
+const OfpMeterFlags_OFPMF_BURST = OfpMeterFlags(openflow_13.OfpMeterFlags_OFPMF_BURST)
+const OfpMeterFlags_OFPMF_STATS = OfpMeterFlags(openflow_13.OfpMeterFlags_OFPMF_STATS)
+
+// OfpErrorType from public import voltha_protos/openflow_13.proto
+type OfpErrorType = openflow_13.OfpErrorType
+
+var OfpErrorType_name = openflow_13.OfpErrorType_name
+var OfpErrorType_value = openflow_13.OfpErrorType_value
+
+const OfpErrorType_OFPET_HELLO_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_HELLO_FAILED)
+const OfpErrorType_OFPET_BAD_REQUEST = OfpErrorType(openflow_13.OfpErrorType_OFPET_BAD_REQUEST)
+const OfpErrorType_OFPET_BAD_ACTION = OfpErrorType(openflow_13.OfpErrorType_OFPET_BAD_ACTION)
+const OfpErrorType_OFPET_BAD_INSTRUCTION = OfpErrorType(openflow_13.OfpErrorType_OFPET_BAD_INSTRUCTION)
+const OfpErrorType_OFPET_BAD_MATCH = OfpErrorType(openflow_13.OfpErrorType_OFPET_BAD_MATCH)
+const OfpErrorType_OFPET_FLOW_MOD_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_FLOW_MOD_FAILED)
+const OfpErrorType_OFPET_GROUP_MOD_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_GROUP_MOD_FAILED)
+const OfpErrorType_OFPET_PORT_MOD_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_PORT_MOD_FAILED)
+const OfpErrorType_OFPET_TABLE_MOD_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_TABLE_MOD_FAILED)
+const OfpErrorType_OFPET_QUEUE_OP_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_QUEUE_OP_FAILED)
+const OfpErrorType_OFPET_SWITCH_CONFIG_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_SWITCH_CONFIG_FAILED)
+const OfpErrorType_OFPET_ROLE_REQUEST_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_ROLE_REQUEST_FAILED)
+const OfpErrorType_OFPET_METER_MOD_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_METER_MOD_FAILED)
+const OfpErrorType_OFPET_TABLE_FEATURES_FAILED = OfpErrorType(openflow_13.OfpErrorType_OFPET_TABLE_FEATURES_FAILED)
+const OfpErrorType_OFPET_EXPERIMENTER = OfpErrorType(openflow_13.OfpErrorType_OFPET_EXPERIMENTER)
+
+// OfpHelloFailedCode from public import voltha_protos/openflow_13.proto
+type OfpHelloFailedCode = openflow_13.OfpHelloFailedCode
+
+var OfpHelloFailedCode_name = openflow_13.OfpHelloFailedCode_name
+var OfpHelloFailedCode_value = openflow_13.OfpHelloFailedCode_value
+
+const OfpHelloFailedCode_OFPHFC_INCOMPATIBLE = OfpHelloFailedCode(openflow_13.OfpHelloFailedCode_OFPHFC_INCOMPATIBLE)
+const OfpHelloFailedCode_OFPHFC_EPERM = OfpHelloFailedCode(openflow_13.OfpHelloFailedCode_OFPHFC_EPERM)
+
+// OfpBadRequestCode from public import voltha_protos/openflow_13.proto
+type OfpBadRequestCode = openflow_13.OfpBadRequestCode
+
+var OfpBadRequestCode_name = openflow_13.OfpBadRequestCode_name
+var OfpBadRequestCode_value = openflow_13.OfpBadRequestCode_value
+
+const OfpBadRequestCode_OFPBRC_BAD_VERSION = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_VERSION)
+const OfpBadRequestCode_OFPBRC_BAD_TYPE = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_TYPE)
+const OfpBadRequestCode_OFPBRC_BAD_MULTIPART = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_MULTIPART)
+const OfpBadRequestCode_OFPBRC_BAD_EXPERIMENTER = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_EXPERIMENTER)
+const OfpBadRequestCode_OFPBRC_BAD_EXP_TYPE = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_EXP_TYPE)
+const OfpBadRequestCode_OFPBRC_EPERM = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_EPERM)
+const OfpBadRequestCode_OFPBRC_BAD_LEN = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_LEN)
+const OfpBadRequestCode_OFPBRC_BUFFER_EMPTY = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BUFFER_EMPTY)
+const OfpBadRequestCode_OFPBRC_BUFFER_UNKNOWN = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BUFFER_UNKNOWN)
+const OfpBadRequestCode_OFPBRC_BAD_TABLE_ID = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_TABLE_ID)
+const OfpBadRequestCode_OFPBRC_IS_SLAVE = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_IS_SLAVE)
+const OfpBadRequestCode_OFPBRC_BAD_PORT = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_PORT)
+const OfpBadRequestCode_OFPBRC_BAD_PACKET = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_BAD_PACKET)
+const OfpBadRequestCode_OFPBRC_MULTIPART_BUFFER_OVERFLOW = OfpBadRequestCode(openflow_13.OfpBadRequestCode_OFPBRC_MULTIPART_BUFFER_OVERFLOW)
+
+// OfpBadActionCode from public import voltha_protos/openflow_13.proto
+type OfpBadActionCode = openflow_13.OfpBadActionCode
+
+var OfpBadActionCode_name = openflow_13.OfpBadActionCode_name
+var OfpBadActionCode_value = openflow_13.OfpBadActionCode_value
+
+const OfpBadActionCode_OFPBAC_BAD_TYPE = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_TYPE)
+const OfpBadActionCode_OFPBAC_BAD_LEN = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_LEN)
+const OfpBadActionCode_OFPBAC_BAD_EXPERIMENTER = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_EXPERIMENTER)
+const OfpBadActionCode_OFPBAC_BAD_EXP_TYPE = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_EXP_TYPE)
+const OfpBadActionCode_OFPBAC_BAD_OUT_PORT = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_OUT_PORT)
+const OfpBadActionCode_OFPBAC_BAD_ARGUMENT = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_ARGUMENT)
+const OfpBadActionCode_OFPBAC_EPERM = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_EPERM)
+const OfpBadActionCode_OFPBAC_TOO_MANY = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_TOO_MANY)
+const OfpBadActionCode_OFPBAC_BAD_QUEUE = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_QUEUE)
+const OfpBadActionCode_OFPBAC_BAD_OUT_GROUP = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_OUT_GROUP)
+const OfpBadActionCode_OFPBAC_MATCH_INCONSISTENT = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_MATCH_INCONSISTENT)
+const OfpBadActionCode_OFPBAC_UNSUPPORTED_ORDER = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_UNSUPPORTED_ORDER)
+const OfpBadActionCode_OFPBAC_BAD_TAG = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_TAG)
+const OfpBadActionCode_OFPBAC_BAD_SET_TYPE = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_SET_TYPE)
+const OfpBadActionCode_OFPBAC_BAD_SET_LEN = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_SET_LEN)
+const OfpBadActionCode_OFPBAC_BAD_SET_ARGUMENT = OfpBadActionCode(openflow_13.OfpBadActionCode_OFPBAC_BAD_SET_ARGUMENT)
+
+// OfpBadInstructionCode from public import voltha_protos/openflow_13.proto
+type OfpBadInstructionCode = openflow_13.OfpBadInstructionCode
+
+var OfpBadInstructionCode_name = openflow_13.OfpBadInstructionCode_name
+var OfpBadInstructionCode_value = openflow_13.OfpBadInstructionCode_value
+
+const OfpBadInstructionCode_OFPBIC_UNKNOWN_INST = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_UNKNOWN_INST)
+const OfpBadInstructionCode_OFPBIC_UNSUP_INST = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_UNSUP_INST)
+const OfpBadInstructionCode_OFPBIC_BAD_TABLE_ID = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_BAD_TABLE_ID)
+const OfpBadInstructionCode_OFPBIC_UNSUP_METADATA = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_UNSUP_METADATA)
+const OfpBadInstructionCode_OFPBIC_UNSUP_METADATA_MASK = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_UNSUP_METADATA_MASK)
+const OfpBadInstructionCode_OFPBIC_BAD_EXPERIMENTER = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_BAD_EXPERIMENTER)
+const OfpBadInstructionCode_OFPBIC_BAD_EXP_TYPE = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_BAD_EXP_TYPE)
+const OfpBadInstructionCode_OFPBIC_BAD_LEN = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_BAD_LEN)
+const OfpBadInstructionCode_OFPBIC_EPERM = OfpBadInstructionCode(openflow_13.OfpBadInstructionCode_OFPBIC_EPERM)
+
+// OfpBadMatchCode from public import voltha_protos/openflow_13.proto
+type OfpBadMatchCode = openflow_13.OfpBadMatchCode
+
+var OfpBadMatchCode_name = openflow_13.OfpBadMatchCode_name
+var OfpBadMatchCode_value = openflow_13.OfpBadMatchCode_value
+
+const OfpBadMatchCode_OFPBMC_BAD_TYPE = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_TYPE)
+const OfpBadMatchCode_OFPBMC_BAD_LEN = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_LEN)
+const OfpBadMatchCode_OFPBMC_BAD_TAG = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_TAG)
+const OfpBadMatchCode_OFPBMC_BAD_DL_ADDR_MASK = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_DL_ADDR_MASK)
+const OfpBadMatchCode_OFPBMC_BAD_NW_ADDR_MASK = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_NW_ADDR_MASK)
+const OfpBadMatchCode_OFPBMC_BAD_WILDCARDS = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_WILDCARDS)
+const OfpBadMatchCode_OFPBMC_BAD_FIELD = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_FIELD)
+const OfpBadMatchCode_OFPBMC_BAD_VALUE = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_VALUE)
+const OfpBadMatchCode_OFPBMC_BAD_MASK = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_MASK)
+const OfpBadMatchCode_OFPBMC_BAD_PREREQ = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_BAD_PREREQ)
+const OfpBadMatchCode_OFPBMC_DUP_FIELD = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_DUP_FIELD)
+const OfpBadMatchCode_OFPBMC_EPERM = OfpBadMatchCode(openflow_13.OfpBadMatchCode_OFPBMC_EPERM)
+
+// OfpFlowModFailedCode from public import voltha_protos/openflow_13.proto
+type OfpFlowModFailedCode = openflow_13.OfpFlowModFailedCode
+
+var OfpFlowModFailedCode_name = openflow_13.OfpFlowModFailedCode_name
+var OfpFlowModFailedCode_value = openflow_13.OfpFlowModFailedCode_value
+
+const OfpFlowModFailedCode_OFPFMFC_UNKNOWN = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_UNKNOWN)
+const OfpFlowModFailedCode_OFPFMFC_TABLE_FULL = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_TABLE_FULL)
+const OfpFlowModFailedCode_OFPFMFC_BAD_TABLE_ID = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_BAD_TABLE_ID)
+const OfpFlowModFailedCode_OFPFMFC_OVERLAP = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_OVERLAP)
+const OfpFlowModFailedCode_OFPFMFC_EPERM = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_EPERM)
+const OfpFlowModFailedCode_OFPFMFC_BAD_TIMEOUT = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_BAD_TIMEOUT)
+const OfpFlowModFailedCode_OFPFMFC_BAD_COMMAND = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_BAD_COMMAND)
+const OfpFlowModFailedCode_OFPFMFC_BAD_FLAGS = OfpFlowModFailedCode(openflow_13.OfpFlowModFailedCode_OFPFMFC_BAD_FLAGS)
+
+// OfpGroupModFailedCode from public import voltha_protos/openflow_13.proto
+type OfpGroupModFailedCode = openflow_13.OfpGroupModFailedCode
+
+var OfpGroupModFailedCode_name = openflow_13.OfpGroupModFailedCode_name
+var OfpGroupModFailedCode_value = openflow_13.OfpGroupModFailedCode_value
+
+const OfpGroupModFailedCode_OFPGMFC_GROUP_EXISTS = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_GROUP_EXISTS)
+const OfpGroupModFailedCode_OFPGMFC_INVALID_GROUP = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_INVALID_GROUP)
+const OfpGroupModFailedCode_OFPGMFC_WEIGHT_UNSUPPORTED = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_WEIGHT_UNSUPPORTED)
+const OfpGroupModFailedCode_OFPGMFC_OUT_OF_GROUPS = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_OUT_OF_GROUPS)
+const OfpGroupModFailedCode_OFPGMFC_OUT_OF_BUCKETS = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_OUT_OF_BUCKETS)
+const OfpGroupModFailedCode_OFPGMFC_CHAINING_UNSUPPORTED = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_CHAINING_UNSUPPORTED)
+const OfpGroupModFailedCode_OFPGMFC_WATCH_UNSUPPORTED = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_WATCH_UNSUPPORTED)
+const OfpGroupModFailedCode_OFPGMFC_LOOP = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_LOOP)
+const OfpGroupModFailedCode_OFPGMFC_UNKNOWN_GROUP = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_UNKNOWN_GROUP)
+const OfpGroupModFailedCode_OFPGMFC_CHAINED_GROUP = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_CHAINED_GROUP)
+const OfpGroupModFailedCode_OFPGMFC_BAD_TYPE = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_BAD_TYPE)
+const OfpGroupModFailedCode_OFPGMFC_BAD_COMMAND = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_BAD_COMMAND)
+const OfpGroupModFailedCode_OFPGMFC_BAD_BUCKET = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_BAD_BUCKET)
+const OfpGroupModFailedCode_OFPGMFC_BAD_WATCH = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_BAD_WATCH)
+const OfpGroupModFailedCode_OFPGMFC_EPERM = OfpGroupModFailedCode(openflow_13.OfpGroupModFailedCode_OFPGMFC_EPERM)
+
+// OfpPortModFailedCode from public import voltha_protos/openflow_13.proto
+type OfpPortModFailedCode = openflow_13.OfpPortModFailedCode
+
+var OfpPortModFailedCode_name = openflow_13.OfpPortModFailedCode_name
+var OfpPortModFailedCode_value = openflow_13.OfpPortModFailedCode_value
+
+const OfpPortModFailedCode_OFPPMFC_BAD_PORT = OfpPortModFailedCode(openflow_13.OfpPortModFailedCode_OFPPMFC_BAD_PORT)
+const OfpPortModFailedCode_OFPPMFC_BAD_HW_ADDR = OfpPortModFailedCode(openflow_13.OfpPortModFailedCode_OFPPMFC_BAD_HW_ADDR)
+const OfpPortModFailedCode_OFPPMFC_BAD_CONFIG = OfpPortModFailedCode(openflow_13.OfpPortModFailedCode_OFPPMFC_BAD_CONFIG)
+const OfpPortModFailedCode_OFPPMFC_BAD_ADVERTISE = OfpPortModFailedCode(openflow_13.OfpPortModFailedCode_OFPPMFC_BAD_ADVERTISE)
+const OfpPortModFailedCode_OFPPMFC_EPERM = OfpPortModFailedCode(openflow_13.OfpPortModFailedCode_OFPPMFC_EPERM)
+
+// OfpTableModFailedCode from public import voltha_protos/openflow_13.proto
+type OfpTableModFailedCode = openflow_13.OfpTableModFailedCode
+
+var OfpTableModFailedCode_name = openflow_13.OfpTableModFailedCode_name
+var OfpTableModFailedCode_value = openflow_13.OfpTableModFailedCode_value
+
+const OfpTableModFailedCode_OFPTMFC_BAD_TABLE = OfpTableModFailedCode(openflow_13.OfpTableModFailedCode_OFPTMFC_BAD_TABLE)
+const OfpTableModFailedCode_OFPTMFC_BAD_CONFIG = OfpTableModFailedCode(openflow_13.OfpTableModFailedCode_OFPTMFC_BAD_CONFIG)
+const OfpTableModFailedCode_OFPTMFC_EPERM = OfpTableModFailedCode(openflow_13.OfpTableModFailedCode_OFPTMFC_EPERM)
+
+// OfpQueueOpFailedCode from public import voltha_protos/openflow_13.proto
+type OfpQueueOpFailedCode = openflow_13.OfpQueueOpFailedCode
+
+var OfpQueueOpFailedCode_name = openflow_13.OfpQueueOpFailedCode_name
+var OfpQueueOpFailedCode_value = openflow_13.OfpQueueOpFailedCode_value
+
+const OfpQueueOpFailedCode_OFPQOFC_BAD_PORT = OfpQueueOpFailedCode(openflow_13.OfpQueueOpFailedCode_OFPQOFC_BAD_PORT)
+const OfpQueueOpFailedCode_OFPQOFC_BAD_QUEUE = OfpQueueOpFailedCode(openflow_13.OfpQueueOpFailedCode_OFPQOFC_BAD_QUEUE)
+const OfpQueueOpFailedCode_OFPQOFC_EPERM = OfpQueueOpFailedCode(openflow_13.OfpQueueOpFailedCode_OFPQOFC_EPERM)
+
+// OfpSwitchConfigFailedCode from public import voltha_protos/openflow_13.proto
+type OfpSwitchConfigFailedCode = openflow_13.OfpSwitchConfigFailedCode
+
+var OfpSwitchConfigFailedCode_name = openflow_13.OfpSwitchConfigFailedCode_name
+var OfpSwitchConfigFailedCode_value = openflow_13.OfpSwitchConfigFailedCode_value
+
+const OfpSwitchConfigFailedCode_OFPSCFC_BAD_FLAGS = OfpSwitchConfigFailedCode(openflow_13.OfpSwitchConfigFailedCode_OFPSCFC_BAD_FLAGS)
+const OfpSwitchConfigFailedCode_OFPSCFC_BAD_LEN = OfpSwitchConfigFailedCode(openflow_13.OfpSwitchConfigFailedCode_OFPSCFC_BAD_LEN)
+const OfpSwitchConfigFailedCode_OFPSCFC_EPERM = OfpSwitchConfigFailedCode(openflow_13.OfpSwitchConfigFailedCode_OFPSCFC_EPERM)
+
+// OfpRoleRequestFailedCode from public import voltha_protos/openflow_13.proto
+type OfpRoleRequestFailedCode = openflow_13.OfpRoleRequestFailedCode
+
+var OfpRoleRequestFailedCode_name = openflow_13.OfpRoleRequestFailedCode_name
+var OfpRoleRequestFailedCode_value = openflow_13.OfpRoleRequestFailedCode_value
+
+const OfpRoleRequestFailedCode_OFPRRFC_STALE = OfpRoleRequestFailedCode(openflow_13.OfpRoleRequestFailedCode_OFPRRFC_STALE)
+const OfpRoleRequestFailedCode_OFPRRFC_UNSUP = OfpRoleRequestFailedCode(openflow_13.OfpRoleRequestFailedCode_OFPRRFC_UNSUP)
+const OfpRoleRequestFailedCode_OFPRRFC_BAD_ROLE = OfpRoleRequestFailedCode(openflow_13.OfpRoleRequestFailedCode_OFPRRFC_BAD_ROLE)
+
+// OfpMeterModFailedCode from public import voltha_protos/openflow_13.proto
+type OfpMeterModFailedCode = openflow_13.OfpMeterModFailedCode
+
+var OfpMeterModFailedCode_name = openflow_13.OfpMeterModFailedCode_name
+var OfpMeterModFailedCode_value = openflow_13.OfpMeterModFailedCode_value
+
+const OfpMeterModFailedCode_OFPMMFC_UNKNOWN = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_UNKNOWN)
+const OfpMeterModFailedCode_OFPMMFC_METER_EXISTS = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_METER_EXISTS)
+const OfpMeterModFailedCode_OFPMMFC_INVALID_METER = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_INVALID_METER)
+const OfpMeterModFailedCode_OFPMMFC_UNKNOWN_METER = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_UNKNOWN_METER)
+const OfpMeterModFailedCode_OFPMMFC_BAD_COMMAND = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_COMMAND)
+const OfpMeterModFailedCode_OFPMMFC_BAD_FLAGS = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_FLAGS)
+const OfpMeterModFailedCode_OFPMMFC_BAD_RATE = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_RATE)
+const OfpMeterModFailedCode_OFPMMFC_BAD_BURST = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_BURST)
+const OfpMeterModFailedCode_OFPMMFC_BAD_BAND = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_BAND)
+const OfpMeterModFailedCode_OFPMMFC_BAD_BAND_DETAIL = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_BAD_BAND_DETAIL)
+const OfpMeterModFailedCode_OFPMMFC_OUT_OF_METERS = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_OUT_OF_METERS)
+const OfpMeterModFailedCode_OFPMMFC_OUT_OF_BANDS = OfpMeterModFailedCode(openflow_13.OfpMeterModFailedCode_OFPMMFC_OUT_OF_BANDS)
+
+// OfpTableFeaturesFailedCode from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturesFailedCode = openflow_13.OfpTableFeaturesFailedCode
+
+var OfpTableFeaturesFailedCode_name = openflow_13.OfpTableFeaturesFailedCode_name
+var OfpTableFeaturesFailedCode_value = openflow_13.OfpTableFeaturesFailedCode_value
+
+const OfpTableFeaturesFailedCode_OFPTFFC_BAD_TABLE = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_BAD_TABLE)
+const OfpTableFeaturesFailedCode_OFPTFFC_BAD_METADATA = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_BAD_METADATA)
+const OfpTableFeaturesFailedCode_OFPTFFC_BAD_TYPE = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_BAD_TYPE)
+const OfpTableFeaturesFailedCode_OFPTFFC_BAD_LEN = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_BAD_LEN)
+const OfpTableFeaturesFailedCode_OFPTFFC_BAD_ARGUMENT = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_BAD_ARGUMENT)
+const OfpTableFeaturesFailedCode_OFPTFFC_EPERM = OfpTableFeaturesFailedCode(openflow_13.OfpTableFeaturesFailedCode_OFPTFFC_EPERM)
+
+// OfpMultipartType from public import voltha_protos/openflow_13.proto
+type OfpMultipartType = openflow_13.OfpMultipartType
+
+var OfpMultipartType_name = openflow_13.OfpMultipartType_name
+var OfpMultipartType_value = openflow_13.OfpMultipartType_value
+
+const OfpMultipartType_OFPMP_DESC = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_DESC)
+const OfpMultipartType_OFPMP_FLOW = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_FLOW)
+const OfpMultipartType_OFPMP_AGGREGATE = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_AGGREGATE)
+const OfpMultipartType_OFPMP_TABLE = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_TABLE)
+const OfpMultipartType_OFPMP_PORT_STATS = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_PORT_STATS)
+const OfpMultipartType_OFPMP_QUEUE = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_QUEUE)
+const OfpMultipartType_OFPMP_GROUP = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_GROUP)
+const OfpMultipartType_OFPMP_GROUP_DESC = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_GROUP_DESC)
+const OfpMultipartType_OFPMP_GROUP_FEATURES = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_GROUP_FEATURES)
+const OfpMultipartType_OFPMP_METER = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_METER)
+const OfpMultipartType_OFPMP_METER_CONFIG = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_METER_CONFIG)
+const OfpMultipartType_OFPMP_METER_FEATURES = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_METER_FEATURES)
+const OfpMultipartType_OFPMP_TABLE_FEATURES = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_TABLE_FEATURES)
+const OfpMultipartType_OFPMP_PORT_DESC = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_PORT_DESC)
+const OfpMultipartType_OFPMP_EXPERIMENTER = OfpMultipartType(openflow_13.OfpMultipartType_OFPMP_EXPERIMENTER)
+
+// OfpMultipartRequestFlags from public import voltha_protos/openflow_13.proto
+type OfpMultipartRequestFlags = openflow_13.OfpMultipartRequestFlags
+
+var OfpMultipartRequestFlags_name = openflow_13.OfpMultipartRequestFlags_name
+var OfpMultipartRequestFlags_value = openflow_13.OfpMultipartRequestFlags_value
+
+const OfpMultipartRequestFlags_OFPMPF_REQ_INVALID = OfpMultipartRequestFlags(openflow_13.OfpMultipartRequestFlags_OFPMPF_REQ_INVALID)
+const OfpMultipartRequestFlags_OFPMPF_REQ_MORE = OfpMultipartRequestFlags(openflow_13.OfpMultipartRequestFlags_OFPMPF_REQ_MORE)
+
+// OfpMultipartReplyFlags from public import voltha_protos/openflow_13.proto
+type OfpMultipartReplyFlags = openflow_13.OfpMultipartReplyFlags
+
+var OfpMultipartReplyFlags_name = openflow_13.OfpMultipartReplyFlags_name
+var OfpMultipartReplyFlags_value = openflow_13.OfpMultipartReplyFlags_value
+
+const OfpMultipartReplyFlags_OFPMPF_REPLY_INVALID = OfpMultipartReplyFlags(openflow_13.OfpMultipartReplyFlags_OFPMPF_REPLY_INVALID)
+const OfpMultipartReplyFlags_OFPMPF_REPLY_MORE = OfpMultipartReplyFlags(openflow_13.OfpMultipartReplyFlags_OFPMPF_REPLY_MORE)
+
+// OfpTableFeaturePropType from public import voltha_protos/openflow_13.proto
+type OfpTableFeaturePropType = openflow_13.OfpTableFeaturePropType
+
+var OfpTableFeaturePropType_name = openflow_13.OfpTableFeaturePropType_name
+var OfpTableFeaturePropType_value = openflow_13.OfpTableFeaturePropType_value
+
+const OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS)
+const OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS_MISS)
+const OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES)
+const OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES_MISS)
+const OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS)
+const OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS_MISS)
+const OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS)
+const OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS_MISS)
+const OfpTableFeaturePropType_OFPTFPT_MATCH = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_MATCH)
+const OfpTableFeaturePropType_OFPTFPT_WILDCARDS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_WILDCARDS)
+const OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD)
+const OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD_MISS)
+const OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD)
+const OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD_MISS)
+const OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER)
+const OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER_MISS = OfpTableFeaturePropType(openflow_13.OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER_MISS)
+
+// OfpGroupCapabilities from public import voltha_protos/openflow_13.proto
+type OfpGroupCapabilities = openflow_13.OfpGroupCapabilities
+
+var OfpGroupCapabilities_name = openflow_13.OfpGroupCapabilities_name
+var OfpGroupCapabilities_value = openflow_13.OfpGroupCapabilities_value
+
+const OfpGroupCapabilities_OFPGFC_INVALID = OfpGroupCapabilities(openflow_13.OfpGroupCapabilities_OFPGFC_INVALID)
+const OfpGroupCapabilities_OFPGFC_SELECT_WEIGHT = OfpGroupCapabilities(openflow_13.OfpGroupCapabilities_OFPGFC_SELECT_WEIGHT)
+const OfpGroupCapabilities_OFPGFC_SELECT_LIVENESS = OfpGroupCapabilities(openflow_13.OfpGroupCapabilities_OFPGFC_SELECT_LIVENESS)
+const OfpGroupCapabilities_OFPGFC_CHAINING = OfpGroupCapabilities(openflow_13.OfpGroupCapabilities_OFPGFC_CHAINING)
+const OfpGroupCapabilities_OFPGFC_CHAINING_CHECKS = OfpGroupCapabilities(openflow_13.OfpGroupCapabilities_OFPGFC_CHAINING_CHECKS)
+
+// OfpQueueProperties from public import voltha_protos/openflow_13.proto
+type OfpQueueProperties = openflow_13.OfpQueueProperties
+
+var OfpQueueProperties_name = openflow_13.OfpQueueProperties_name
+var OfpQueueProperties_value = openflow_13.OfpQueueProperties_value
+
+const OfpQueueProperties_OFPQT_INVALID = OfpQueueProperties(openflow_13.OfpQueueProperties_OFPQT_INVALID)
+const OfpQueueProperties_OFPQT_MIN_RATE = OfpQueueProperties(openflow_13.OfpQueueProperties_OFPQT_MIN_RATE)
+const OfpQueueProperties_OFPQT_MAX_RATE = OfpQueueProperties(openflow_13.OfpQueueProperties_OFPQT_MAX_RATE)
+const OfpQueueProperties_OFPQT_EXPERIMENTER = OfpQueueProperties(openflow_13.OfpQueueProperties_OFPQT_EXPERIMENTER)
+
+// OfpControllerRole from public import voltha_protos/openflow_13.proto
+type OfpControllerRole = openflow_13.OfpControllerRole
+
+var OfpControllerRole_name = openflow_13.OfpControllerRole_name
+var OfpControllerRole_value = openflow_13.OfpControllerRole_value
+
+const OfpControllerRole_OFPCR_ROLE_NOCHANGE = OfpControllerRole(openflow_13.OfpControllerRole_OFPCR_ROLE_NOCHANGE)
+const OfpControllerRole_OFPCR_ROLE_EQUAL = OfpControllerRole(openflow_13.OfpControllerRole_OFPCR_ROLE_EQUAL)
+const OfpControllerRole_OFPCR_ROLE_MASTER = OfpControllerRole(openflow_13.OfpControllerRole_OFPCR_ROLE_MASTER)
+const OfpControllerRole_OFPCR_ROLE_SLAVE = OfpControllerRole(openflow_13.OfpControllerRole_OFPCR_ROLE_SLAVE)
+
+type EventFilterRuleKey_EventFilterRuleType int32
+
+const (
+	EventFilterRuleKey_filter_all        EventFilterRuleKey_EventFilterRuleType = 0
+	EventFilterRuleKey_category          EventFilterRuleKey_EventFilterRuleType = 1
+	EventFilterRuleKey_sub_category      EventFilterRuleKey_EventFilterRuleType = 2
+	EventFilterRuleKey_kpi_event_type    EventFilterRuleKey_EventFilterRuleType = 3
+	EventFilterRuleKey_config_event_type EventFilterRuleKey_EventFilterRuleType = 4
+	EventFilterRuleKey_device_event_type EventFilterRuleKey_EventFilterRuleType = 5
+)
+
+var EventFilterRuleKey_EventFilterRuleType_name = map[int32]string{
+	0: "filter_all",
+	1: "category",
+	2: "sub_category",
+	3: "kpi_event_type",
+	4: "config_event_type",
+	5: "device_event_type",
+}
+
+var EventFilterRuleKey_EventFilterRuleType_value = map[string]int32{
+	"filter_all":        0,
+	"category":          1,
+	"sub_category":      2,
+	"kpi_event_type":    3,
+	"config_event_type": 4,
+	"device_event_type": 5,
+}
+
+func (x EventFilterRuleKey_EventFilterRuleType) String() string {
+	return proto.EnumName(EventFilterRuleKey_EventFilterRuleType_name, int32(x))
+}
+
+func (EventFilterRuleKey_EventFilterRuleType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{2, 0}
+}
+
+type TestResponse_TestResponseResult int32
+
+const (
+	TestResponse_SUCCESS TestResponse_TestResponseResult = 0
+	TestResponse_FAILURE TestResponse_TestResponseResult = 1
+)
+
+var TestResponse_TestResponseResult_name = map[int32]string{
+	0: "SUCCESS",
+	1: "FAILURE",
+}
+
+var TestResponse_TestResponseResult_value = map[string]int32{
+	"SUCCESS": 0,
+	"FAILURE": 1,
+}
+
+func (x TestResponse_TestResponseResult) String() string {
+	return proto.EnumName(TestResponse_TestResponseResult_name, int32(x))
+}
+
+func (TestResponse_TestResponseResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{9, 0}
+}
+
+type SelfTestResponse_SelfTestResult int32
+
+const (
+	SelfTestResponse_SUCCESS       SelfTestResponse_SelfTestResult = 0
+	SelfTestResponse_FAILURE       SelfTestResponse_SelfTestResult = 1
+	SelfTestResponse_NOT_SUPPORTED SelfTestResponse_SelfTestResult = 2
+	SelfTestResponse_UNKNOWN_ERROR SelfTestResponse_SelfTestResult = 3
+)
+
+var SelfTestResponse_SelfTestResult_name = map[int32]string{
+	0: "SUCCESS",
+	1: "FAILURE",
+	2: "NOT_SUPPORTED",
+	3: "UNKNOWN_ERROR",
+}
+
+var SelfTestResponse_SelfTestResult_value = map[string]int32{
+	"SUCCESS":       0,
+	"FAILURE":       1,
+	"NOT_SUPPORTED": 2,
+	"UNKNOWN_ERROR": 3,
+}
+
+func (x SelfTestResponse_SelfTestResult) String() string {
+	return proto.EnumName(SelfTestResponse_SelfTestResult_name, int32(x))
+}
+
+func (SelfTestResponse_SelfTestResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{11, 0}
+}
+
+type DeviceGroup struct {
+	Id                   string           `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	LogicalDevices       []*LogicalDevice `protobuf:"bytes,2,rep,name=logical_devices,json=logicalDevices,proto3" json:"logical_devices,omitempty"`
+	Devices              []*Device        `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *DeviceGroup) Reset()         { *m = DeviceGroup{} }
+func (m *DeviceGroup) String() string { return proto.CompactTextString(m) }
+func (*DeviceGroup) ProtoMessage()    {}
+func (*DeviceGroup) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{0}
+}
+
+func (m *DeviceGroup) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceGroup.Unmarshal(m, b)
+}
+func (m *DeviceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceGroup.Marshal(b, m, deterministic)
+}
+func (m *DeviceGroup) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceGroup.Merge(m, src)
+}
+func (m *DeviceGroup) XXX_Size() int {
+	return xxx_messageInfo_DeviceGroup.Size(m)
+}
+func (m *DeviceGroup) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceGroup proto.InternalMessageInfo
+
+func (m *DeviceGroup) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *DeviceGroup) GetLogicalDevices() []*LogicalDevice {
+	if m != nil {
+		return m.LogicalDevices
+	}
+	return nil
+}
+
+func (m *DeviceGroup) GetDevices() []*Device {
+	if m != nil {
+		return m.Devices
+	}
+	return nil
+}
+
+type DeviceGroups struct {
+	Items                []*DeviceGroup `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *DeviceGroups) Reset()         { *m = DeviceGroups{} }
+func (m *DeviceGroups) String() string { return proto.CompactTextString(m) }
+func (*DeviceGroups) ProtoMessage()    {}
+func (*DeviceGroups) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{1}
+}
+
+func (m *DeviceGroups) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceGroups.Unmarshal(m, b)
+}
+func (m *DeviceGroups) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceGroups.Marshal(b, m, deterministic)
+}
+func (m *DeviceGroups) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceGroups.Merge(m, src)
+}
+func (m *DeviceGroups) XXX_Size() int {
+	return xxx_messageInfo_DeviceGroups.Size(m)
+}
+func (m *DeviceGroups) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceGroups.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceGroups proto.InternalMessageInfo
+
+func (m *DeviceGroups) GetItems() []*DeviceGroup {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type EventFilterRuleKey struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventFilterRuleKey) Reset()         { *m = EventFilterRuleKey{} }
+func (m *EventFilterRuleKey) String() string { return proto.CompactTextString(m) }
+func (*EventFilterRuleKey) ProtoMessage()    {}
+func (*EventFilterRuleKey) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{2}
+}
+
+func (m *EventFilterRuleKey) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilterRuleKey.Unmarshal(m, b)
+}
+func (m *EventFilterRuleKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilterRuleKey.Marshal(b, m, deterministic)
+}
+func (m *EventFilterRuleKey) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilterRuleKey.Merge(m, src)
+}
+func (m *EventFilterRuleKey) XXX_Size() int {
+	return xxx_messageInfo_EventFilterRuleKey.Size(m)
+}
+func (m *EventFilterRuleKey) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilterRuleKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilterRuleKey proto.InternalMessageInfo
+
+type EventFilterRule struct {
+	Key                  EventFilterRuleKey_EventFilterRuleType `protobuf:"varint,1,opt,name=key,proto3,enum=voltha.EventFilterRuleKey_EventFilterRuleType" json:"key,omitempty"`
+	Value                string                                 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                               `json:"-"`
+	XXX_unrecognized     []byte                                 `json:"-"`
+	XXX_sizecache        int32                                  `json:"-"`
+}
+
+func (m *EventFilterRule) Reset()         { *m = EventFilterRule{} }
+func (m *EventFilterRule) String() string { return proto.CompactTextString(m) }
+func (*EventFilterRule) ProtoMessage()    {}
+func (*EventFilterRule) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{3}
+}
+
+func (m *EventFilterRule) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilterRule.Unmarshal(m, b)
+}
+func (m *EventFilterRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilterRule.Marshal(b, m, deterministic)
+}
+func (m *EventFilterRule) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilterRule.Merge(m, src)
+}
+func (m *EventFilterRule) XXX_Size() int {
+	return xxx_messageInfo_EventFilterRule.Size(m)
+}
+func (m *EventFilterRule) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilterRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilterRule proto.InternalMessageInfo
+
+func (m *EventFilterRule) GetKey() EventFilterRuleKey_EventFilterRuleType {
+	if m != nil {
+		return m.Key
+	}
+	return EventFilterRuleKey_filter_all
+}
+
+func (m *EventFilterRule) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type EventFilter struct {
+	Id                   string             `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Enable               bool               `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"`
+	DeviceId             string             `protobuf:"bytes,3,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	EventType            string             `protobuf:"bytes,4,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
+	Rules                []*EventFilterRule `protobuf:"bytes,5,rep,name=rules,proto3" json:"rules,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *EventFilter) Reset()         { *m = EventFilter{} }
+func (m *EventFilter) String() string { return proto.CompactTextString(m) }
+func (*EventFilter) ProtoMessage()    {}
+func (*EventFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{4}
+}
+
+func (m *EventFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilter.Unmarshal(m, b)
+}
+func (m *EventFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilter.Marshal(b, m, deterministic)
+}
+func (m *EventFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilter.Merge(m, src)
+}
+func (m *EventFilter) XXX_Size() int {
+	return xxx_messageInfo_EventFilter.Size(m)
+}
+func (m *EventFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilter proto.InternalMessageInfo
+
+func (m *EventFilter) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *EventFilter) GetEnable() bool {
+	if m != nil {
+		return m.Enable
+	}
+	return false
+}
+
+func (m *EventFilter) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *EventFilter) GetEventType() string {
+	if m != nil {
+		return m.EventType
+	}
+	return ""
+}
+
+func (m *EventFilter) GetRules() []*EventFilterRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+type EventFilters struct {
+	Filters              []*EventFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *EventFilters) Reset()         { *m = EventFilters{} }
+func (m *EventFilters) String() string { return proto.CompactTextString(m) }
+func (*EventFilters) ProtoMessage()    {}
+func (*EventFilters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{5}
+}
+
+func (m *EventFilters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilters.Unmarshal(m, b)
+}
+func (m *EventFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilters.Marshal(b, m, deterministic)
+}
+func (m *EventFilters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilters.Merge(m, src)
+}
+func (m *EventFilters) XXX_Size() int {
+	return xxx_messageInfo_EventFilters.Size(m)
+}
+func (m *EventFilters) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilters proto.InternalMessageInfo
+
+func (m *EventFilters) GetFilters() []*EventFilter {
+	if m != nil {
+		return m.Filters
+	}
+	return nil
+}
+
+// CoreInstance represents a core instance.  It is data held in memory when a core
+// is running.  This data is not persistent.
+type CoreInstance struct {
+	InstanceId           string        `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Health               *HealthStatus `protobuf:"bytes,2,opt,name=health,proto3" json:"health,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *CoreInstance) Reset()         { *m = CoreInstance{} }
+func (m *CoreInstance) String() string { return proto.CompactTextString(m) }
+func (*CoreInstance) ProtoMessage()    {}
+func (*CoreInstance) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{6}
+}
+
+func (m *CoreInstance) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CoreInstance.Unmarshal(m, b)
+}
+func (m *CoreInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CoreInstance.Marshal(b, m, deterministic)
+}
+func (m *CoreInstance) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CoreInstance.Merge(m, src)
+}
+func (m *CoreInstance) XXX_Size() int {
+	return xxx_messageInfo_CoreInstance.Size(m)
+}
+func (m *CoreInstance) XXX_DiscardUnknown() {
+	xxx_messageInfo_CoreInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CoreInstance proto.InternalMessageInfo
+
+func (m *CoreInstance) GetInstanceId() string {
+	if m != nil {
+		return m.InstanceId
+	}
+	return ""
+}
+
+func (m *CoreInstance) GetHealth() *HealthStatus {
+	if m != nil {
+		return m.Health
+	}
+	return nil
+}
+
+type CoreInstances struct {
+	Items                []*CoreInstance `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *CoreInstances) Reset()         { *m = CoreInstances{} }
+func (m *CoreInstances) String() string { return proto.CompactTextString(m) }
+func (*CoreInstances) ProtoMessage()    {}
+func (*CoreInstances) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{7}
+}
+
+func (m *CoreInstances) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CoreInstances.Unmarshal(m, b)
+}
+func (m *CoreInstances) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CoreInstances.Marshal(b, m, deterministic)
+}
+func (m *CoreInstances) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CoreInstances.Merge(m, src)
+}
+func (m *CoreInstances) XXX_Size() int {
+	return xxx_messageInfo_CoreInstances.Size(m)
+}
+func (m *CoreInstances) XXX_DiscardUnknown() {
+	xxx_messageInfo_CoreInstances.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CoreInstances proto.InternalMessageInfo
+
+func (m *CoreInstances) GetItems() []*CoreInstance {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type OmciTestRequest struct {
+	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Uuid                 string   `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciTestRequest) Reset()         { *m = OmciTestRequest{} }
+func (m *OmciTestRequest) String() string { return proto.CompactTextString(m) }
+func (*OmciTestRequest) ProtoMessage()    {}
+func (*OmciTestRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{8}
+}
+
+func (m *OmciTestRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciTestRequest.Unmarshal(m, b)
+}
+func (m *OmciTestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciTestRequest.Marshal(b, m, deterministic)
+}
+func (m *OmciTestRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciTestRequest.Merge(m, src)
+}
+func (m *OmciTestRequest) XXX_Size() int {
+	return xxx_messageInfo_OmciTestRequest.Size(m)
+}
+func (m *OmciTestRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciTestRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciTestRequest proto.InternalMessageInfo
+
+func (m *OmciTestRequest) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *OmciTestRequest) GetUuid() string {
+	if m != nil {
+		return m.Uuid
+	}
+	return ""
+}
+
+type TestResponse struct {
+	Result               TestResponse_TestResponseResult `protobuf:"varint,1,opt,name=result,proto3,enum=voltha.TestResponse_TestResponseResult" json:"result,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *TestResponse) Reset()         { *m = TestResponse{} }
+func (m *TestResponse) String() string { return proto.CompactTextString(m) }
+func (*TestResponse) ProtoMessage()    {}
+func (*TestResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{9}
+}
+
+func (m *TestResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TestResponse.Unmarshal(m, b)
+}
+func (m *TestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TestResponse.Marshal(b, m, deterministic)
+}
+func (m *TestResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TestResponse.Merge(m, src)
+}
+func (m *TestResponse) XXX_Size() int {
+	return xxx_messageInfo_TestResponse.Size(m)
+}
+func (m *TestResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_TestResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TestResponse proto.InternalMessageInfo
+
+func (m *TestResponse) GetResult() TestResponse_TestResponseResult {
+	if m != nil {
+		return m.Result
+	}
+	return TestResponse_SUCCESS
+}
+
+// Voltha represents the Voltha cluster data.  Each Core instance will hold a subset of
+// the entire cluster. However, some items (e.g. adapters) will be held by all cores
+// for better performance
+type Voltha struct {
+	Version              string                  `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	Adapters             []*Adapter              `protobuf:"bytes,2,rep,name=adapters,proto3" json:"adapters,omitempty"`
+	LogicalDevices       []*LogicalDevice        `protobuf:"bytes,3,rep,name=logical_devices,json=logicalDevices,proto3" json:"logical_devices,omitempty"`
+	Devices              []*Device               `protobuf:"bytes,4,rep,name=devices,proto3" json:"devices,omitempty"`
+	DeviceTypes          []*DeviceType           `protobuf:"bytes,5,rep,name=device_types,json=deviceTypes,proto3" json:"device_types,omitempty"`
+	DeviceGroups         []*DeviceGroup          `protobuf:"bytes,6,rep,name=device_groups,json=deviceGroups,proto3" json:"device_groups,omitempty"`
+	EventFilters         []*EventFilter          `protobuf:"bytes,7,rep,name=event_filters,json=eventFilters,proto3" json:"event_filters,omitempty"`
+	OmciMibDatabase      []*omci.MibDeviceData   `protobuf:"bytes,28,rep,name=omci_mib_database,json=omciMibDatabase,proto3" json:"omci_mib_database,omitempty"`
+	OmciAlarmDatabase    []*omci.AlarmDeviceData `protobuf:"bytes,29,rep,name=omci_alarm_database,json=omciAlarmDatabase,proto3" json:"omci_alarm_database,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *Voltha) Reset()         { *m = Voltha{} }
+func (m *Voltha) String() string { return proto.CompactTextString(m) }
+func (*Voltha) ProtoMessage()    {}
+func (*Voltha) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{10}
+}
+
+func (m *Voltha) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Voltha.Unmarshal(m, b)
+}
+func (m *Voltha) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Voltha.Marshal(b, m, deterministic)
+}
+func (m *Voltha) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Voltha.Merge(m, src)
+}
+func (m *Voltha) XXX_Size() int {
+	return xxx_messageInfo_Voltha.Size(m)
+}
+func (m *Voltha) XXX_DiscardUnknown() {
+	xxx_messageInfo_Voltha.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Voltha proto.InternalMessageInfo
+
+func (m *Voltha) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Voltha) GetAdapters() []*Adapter {
+	if m != nil {
+		return m.Adapters
+	}
+	return nil
+}
+
+func (m *Voltha) GetLogicalDevices() []*LogicalDevice {
+	if m != nil {
+		return m.LogicalDevices
+	}
+	return nil
+}
+
+func (m *Voltha) GetDevices() []*Device {
+	if m != nil {
+		return m.Devices
+	}
+	return nil
+}
+
+func (m *Voltha) GetDeviceTypes() []*DeviceType {
+	if m != nil {
+		return m.DeviceTypes
+	}
+	return nil
+}
+
+func (m *Voltha) GetDeviceGroups() []*DeviceGroup {
+	if m != nil {
+		return m.DeviceGroups
+	}
+	return nil
+}
+
+func (m *Voltha) GetEventFilters() []*EventFilter {
+	if m != nil {
+		return m.EventFilters
+	}
+	return nil
+}
+
+func (m *Voltha) GetOmciMibDatabase() []*omci.MibDeviceData {
+	if m != nil {
+		return m.OmciMibDatabase
+	}
+	return nil
+}
+
+func (m *Voltha) GetOmciAlarmDatabase() []*omci.AlarmDeviceData {
+	if m != nil {
+		return m.OmciAlarmDatabase
+	}
+	return nil
+}
+
+// Device Self Test Response
+type SelfTestResponse struct {
+	Result               SelfTestResponse_SelfTestResult `protobuf:"varint,1,opt,name=result,proto3,enum=voltha.SelfTestResponse_SelfTestResult" json:"result,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *SelfTestResponse) Reset()         { *m = SelfTestResponse{} }
+func (m *SelfTestResponse) String() string { return proto.CompactTextString(m) }
+func (*SelfTestResponse) ProtoMessage()    {}
+func (*SelfTestResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{11}
+}
+
+func (m *SelfTestResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SelfTestResponse.Unmarshal(m, b)
+}
+func (m *SelfTestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SelfTestResponse.Marshal(b, m, deterministic)
+}
+func (m *SelfTestResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SelfTestResponse.Merge(m, src)
+}
+func (m *SelfTestResponse) XXX_Size() int {
+	return xxx_messageInfo_SelfTestResponse.Size(m)
+}
+func (m *SelfTestResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SelfTestResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfTestResponse proto.InternalMessageInfo
+
+func (m *SelfTestResponse) GetResult() SelfTestResponse_SelfTestResult {
+	if m != nil {
+		return m.Result
+	}
+	return SelfTestResponse_SUCCESS
+}
+
+type OfAgentSubscriber struct {
+	// ID of ofagent instance
+	OfagentId string `protobuf:"bytes,1,opt,name=ofagent_id,json=ofagentId,proto3" json:"ofagent_id,omitempty"`
+	// ID of voltha instance to which the ofagent is subscribed
+	VolthaId             string   `protobuf:"bytes,2,opt,name=voltha_id,json=volthaId,proto3" json:"voltha_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfAgentSubscriber) Reset()         { *m = OfAgentSubscriber{} }
+func (m *OfAgentSubscriber) String() string { return proto.CompactTextString(m) }
+func (*OfAgentSubscriber) ProtoMessage()    {}
+func (*OfAgentSubscriber) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{12}
+}
+
+func (m *OfAgentSubscriber) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfAgentSubscriber.Unmarshal(m, b)
+}
+func (m *OfAgentSubscriber) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfAgentSubscriber.Marshal(b, m, deterministic)
+}
+func (m *OfAgentSubscriber) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfAgentSubscriber.Merge(m, src)
+}
+func (m *OfAgentSubscriber) XXX_Size() int {
+	return xxx_messageInfo_OfAgentSubscriber.Size(m)
+}
+func (m *OfAgentSubscriber) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfAgentSubscriber.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfAgentSubscriber proto.InternalMessageInfo
+
+func (m *OfAgentSubscriber) GetOfagentId() string {
+	if m != nil {
+		return m.OfagentId
+	}
+	return ""
+}
+
+func (m *OfAgentSubscriber) GetVolthaId() string {
+	if m != nil {
+		return m.VolthaId
+	}
+	return ""
+}
+
+// Identifies a membership group a Core belongs to
+type Membership struct {
+	//  Group name
+	GroupName string `protobuf:"bytes,1,opt,name=group_name,json=groupName,proto3" json:"group_name,omitempty"`
+	// Unique ID of a container within that group
+	Id                   string   `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Membership) Reset()         { *m = Membership{} }
+func (m *Membership) String() string { return proto.CompactTextString(m) }
+func (*Membership) ProtoMessage()    {}
+func (*Membership) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{13}
+}
+
+func (m *Membership) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Membership.Unmarshal(m, b)
+}
+func (m *Membership) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Membership.Marshal(b, m, deterministic)
+}
+func (m *Membership) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Membership.Merge(m, src)
+}
+func (m *Membership) XXX_Size() int {
+	return xxx_messageInfo_Membership.Size(m)
+}
+func (m *Membership) XXX_DiscardUnknown() {
+	xxx_messageInfo_Membership.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Membership proto.InternalMessageInfo
+
+func (m *Membership) GetGroupName() string {
+	if m != nil {
+		return m.GroupName
+	}
+	return ""
+}
+
+func (m *Membership) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+// Additional information required to process flow at device adapters
+type FlowMetadata struct {
+	// Meters associated with flow-update to adapter
+	Meters               []*openflow_13.OfpMeterConfig `protobuf:"bytes,1,rep,name=meters,proto3" json:"meters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *FlowMetadata) Reset()         { *m = FlowMetadata{} }
+func (m *FlowMetadata) String() string { return proto.CompactTextString(m) }
+func (*FlowMetadata) ProtoMessage()    {}
+func (*FlowMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{14}
+}
+
+func (m *FlowMetadata) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowMetadata.Unmarshal(m, b)
+}
+func (m *FlowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowMetadata.Marshal(b, m, deterministic)
+}
+func (m *FlowMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowMetadata.Merge(m, src)
+}
+func (m *FlowMetadata) XXX_Size() int {
+	return xxx_messageInfo_FlowMetadata.Size(m)
+}
+func (m *FlowMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowMetadata proto.InternalMessageInfo
+
+func (m *FlowMetadata) GetMeters() []*openflow_13.OfpMeterConfig {
+	if m != nil {
+		return m.Meters
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("voltha.EventFilterRuleKey_EventFilterRuleType", EventFilterRuleKey_EventFilterRuleType_name, EventFilterRuleKey_EventFilterRuleType_value)
+	proto.RegisterEnum("voltha.TestResponse_TestResponseResult", TestResponse_TestResponseResult_name, TestResponse_TestResponseResult_value)
+	proto.RegisterEnum("voltha.SelfTestResponse_SelfTestResult", SelfTestResponse_SelfTestResult_name, SelfTestResponse_SelfTestResult_value)
+	proto.RegisterType((*DeviceGroup)(nil), "voltha.DeviceGroup")
+	proto.RegisterType((*DeviceGroups)(nil), "voltha.DeviceGroups")
+	proto.RegisterType((*EventFilterRuleKey)(nil), "voltha.EventFilterRuleKey")
+	proto.RegisterType((*EventFilterRule)(nil), "voltha.EventFilterRule")
+	proto.RegisterType((*EventFilter)(nil), "voltha.EventFilter")
+	proto.RegisterType((*EventFilters)(nil), "voltha.EventFilters")
+	proto.RegisterType((*CoreInstance)(nil), "voltha.CoreInstance")
+	proto.RegisterType((*CoreInstances)(nil), "voltha.CoreInstances")
+	proto.RegisterType((*OmciTestRequest)(nil), "voltha.OmciTestRequest")
+	proto.RegisterType((*TestResponse)(nil), "voltha.TestResponse")
+	proto.RegisterType((*Voltha)(nil), "voltha.Voltha")
+	proto.RegisterType((*SelfTestResponse)(nil), "voltha.SelfTestResponse")
+	proto.RegisterType((*OfAgentSubscriber)(nil), "voltha.OfAgentSubscriber")
+	proto.RegisterType((*Membership)(nil), "voltha.Membership")
+	proto.RegisterType((*FlowMetadata)(nil), "voltha.FlowMetadata")
+}
+
+func init() { proto.RegisterFile("voltha_protos/voltha.proto", fileDescriptor_e084f1a60ce7016c) }
+
+var fileDescriptor_e084f1a60ce7016c = []byte{
+	// 2488 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0x4b, 0x73, 0xdb, 0xc8,
+	0xf1, 0x17, 0xf4, 0x56, 0x8b, 0x92, 0xc8, 0xa1, 0x1e, 0x34, 0x25, 0xf9, 0x31, 0xeb, 0xb5, 0xf5,
+	0xd7, 0xae, 0x49, 0xdb, 0xb2, 0x5d, 0xff, 0xd8, 0xd9, 0xca, 0xea, 0x65, 0x85, 0xb1, 0x65, 0x32,
+	0xa0, 0x65, 0xe7, 0xb1, 0x2e, 0x16, 0x48, 0x0c, 0x29, 0x94, 0x41, 0x82, 0x01, 0x86, 0xf2, 0xaa,
+	0x5c, 0x5b, 0xa9, 0xda, 0x3c, 0x36, 0xf7, 0xbd, 0xe7, 0x94, 0x54, 0xaa, 0xf2, 0x1d, 0xf2, 0x11,
+	0xf6, 0x94, 0x53, 0xae, 0xa9, 0x1c, 0xf2, 0x09, 0xf6, 0x9c, 0x9a, 0x9e, 0x01, 0x09, 0x10, 0x80,
+	0x1e, 0x9b, 0xad, 0xca, 0x49, 0xc2, 0x74, 0xcf, 0xef, 0xd7, 0xd3, 0x3d, 0xd3, 0xd3, 0x68, 0x10,
+	0xf2, 0x27, 0x8e, 0xcd, 0x8f, 0x8d, 0x5a, 0xd7, 0x75, 0xb8, 0xe3, 0x15, 0xe5, 0x53, 0x01, 0x9f,
+	0xc8, 0xa4, 0x7c, 0xca, 0xaf, 0xb5, 0x1c, 0xa7, 0x65, 0xb3, 0xa2, 0xd1, 0xb5, 0x8a, 0x46, 0xa7,
+	0xe3, 0x70, 0x83, 0x5b, 0x4e, 0xc7, 0x93, 0x5a, 0xf9, 0x55, 0x25, 0xc5, 0xa7, 0x7a, 0xaf, 0x59,
+	0x64, 0xed, 0x2e, 0x3f, 0x55, 0xc2, 0x5c, 0x18, 0xbe, 0xcd, 0xb8, 0x02, 0xcf, 0x0f, 0x11, 0x37,
+	0x9c, 0x76, 0xdb, 0xe9, 0xc4, 0xcb, 0x8e, 0x99, 0x61, 0xf3, 0x63, 0x25, 0xa3, 0x61, 0x99, 0xed,
+	0xb4, 0xac, 0x86, 0x61, 0xd7, 0x4c, 0x76, 0x62, 0x35, 0x58, 0xfc, 0xfc, 0x90, 0x6c, 0x35, 0x2c,
+	0x33, 0x4c, 0xa3, 0xcb, 0x99, 0xab, 0x84, 0xd7, 0xc2, 0x42, 0xa7, 0xcb, 0x3a, 0x4d, 0xdb, 0x79,
+	0x57, 0xbb, 0xb7, 0x95, 0xa0, 0xd0, 0x6e, 0x58, 0xb5, 0xb6, 0x55, 0xaf, 0x99, 0x75, 0xa5, 0x70,
+	0x23, 0x46, 0xc1, 0xb0, 0x0d, 0xb7, 0xdd, 0x57, 0xa1, 0x7f, 0xd2, 0x60, 0x76, 0x0f, 0x4d, 0x3a,
+	0x70, 0x9d, 0x5e, 0x97, 0x2c, 0xc1, 0xa8, 0x65, 0xe6, 0xb4, 0xeb, 0xda, 0xc6, 0xcc, 0xce, 0xc4,
+	0xbf, 0xbf, 0xfd, 0x66, 0x5d, 0xd3, 0x47, 0x2d, 0x93, 0x94, 0x60, 0x21, 0xbc, 0x38, 0x2f, 0x37,
+	0x7a, 0x7d, 0x6c, 0x63, 0xf6, 0xfe, 0x52, 0x41, 0x45, 0xe9, 0xb9, 0x14, 0x4b, 0xac, 0x9d, 0x99,
+	0x7f, 0x7e, 0xfb, 0xcd, 0xfa, 0xb8, 0xc0, 0xd2, 0xe7, 0xed, 0xa0, 0xc4, 0x23, 0x5b, 0x30, 0xe5,
+	0x43, 0x8c, 0x21, 0xc4, 0xbc, 0x0f, 0x11, 0x9d, 0xeb, 0x6b, 0xd2, 0x1f, 0x40, 0x2a, 0x60, 0xa5,
+	0x47, 0xfe, 0x0f, 0x26, 0x2c, 0xce, 0xda, 0x5e, 0x4e, 0x43, 0x88, 0x6c, 0x18, 0x02, 0x95, 0x74,
+	0xa9, 0x41, 0xff, 0xa8, 0x01, 0xd9, 0x3f, 0x61, 0x1d, 0xfe, 0xd4, 0xb2, 0x39, 0x73, 0xf5, 0x9e,
+	0xcd, 0x9e, 0xb1, 0x53, 0xfa, 0x95, 0x06, 0xd9, 0xa1, 0xe1, 0x97, 0xa7, 0x5d, 0x46, 0xe6, 0x01,
+	0x9a, 0x38, 0x52, 0x33, 0x6c, 0x3b, 0x3d, 0x42, 0x52, 0x30, 0xdd, 0x30, 0x38, 0x6b, 0x39, 0xee,
+	0x69, 0x5a, 0x23, 0x69, 0x48, 0x79, 0xbd, 0x7a, 0xad, 0x3f, 0x32, 0x4a, 0x08, 0xcc, 0xbf, 0xed,
+	0x5a, 0x35, 0x26, 0xa0, 0x6a, 0xfc, 0xb4, 0xcb, 0xd2, 0x63, 0x64, 0x09, 0x32, 0x0d, 0xa7, 0xd3,
+	0xb4, 0x5a, 0xc1, 0xe1, 0x71, 0x31, 0x2c, 0xd7, 0x13, 0x1c, 0x9e, 0xa0, 0x16, 0x2c, 0x0c, 0x19,
+	0x42, 0x3e, 0x85, 0xb1, 0xb7, 0xec, 0x14, 0xc3, 0x30, 0x7f, 0xbf, 0xe0, 0x2f, 0x2e, 0xba, 0x8a,
+	0x42, 0xcc, 0x0a, 0x74, 0x31, 0x95, 0x2c, 0xc2, 0xc4, 0x89, 0x61, 0xf7, 0x58, 0x6e, 0x54, 0x84,
+	0x52, 0x97, 0x0f, 0xf4, 0x2f, 0x1a, 0xcc, 0x06, 0xa6, 0x24, 0x45, 0x7b, 0x19, 0x26, 0x59, 0xc7,
+	0xa8, 0xdb, 0x72, 0xf6, 0xb4, 0xae, 0x9e, 0xc8, 0x2a, 0xcc, 0xa8, 0x05, 0x58, 0x66, 0x6e, 0x0c,
+	0x81, 0xa7, 0xe5, 0x40, 0xc9, 0x24, 0xeb, 0x00, 0x83, 0x65, 0xe5, 0xc6, 0x51, 0x3a, 0x83, 0x23,
+	0xe8, 0xd7, 0x3b, 0x30, 0xe1, 0xf6, 0x6c, 0xe6, 0xe5, 0x26, 0x30, 0x62, 0x2b, 0x09, 0x8b, 0xd2,
+	0xa5, 0x16, 0xfd, 0x04, 0x52, 0x01, 0x89, 0x47, 0xee, 0xc0, 0x94, 0x0c, 0x4b, 0x24, 0xe4, 0x41,
+	0x00, 0x5f, 0x87, 0xbe, 0x85, 0xd4, 0xae, 0xe3, 0xb2, 0x52, 0xc7, 0xe3, 0x46, 0xa7, 0xc1, 0xc8,
+	0x2d, 0x98, 0xb5, 0xd4, 0xff, 0xb5, 0xe1, 0x15, 0x83, 0x2f, 0x29, 0x99, 0x64, 0x0b, 0x26, 0xe5,
+	0x01, 0xc7, 0x95, 0xcf, 0xde, 0x5f, 0xf4, 0x59, 0x7e, 0x8c, 0xa3, 0x55, 0x6e, 0xf0, 0x9e, 0xb7,
+	0x33, 0x21, 0x76, 0xe8, 0x88, 0xae, 0x54, 0xe9, 0x13, 0x98, 0x0b, 0x92, 0x79, 0x64, 0x33, 0xbc,
+	0x3b, 0xfb, 0x20, 0x41, 0x2d, 0x7f, 0x7b, 0x3e, 0x84, 0x85, 0x72, 0xbb, 0x61, 0xbd, 0x64, 0x1e,
+	0xd7, 0xd9, 0xaf, 0x7a, 0xcc, 0xe3, 0x64, 0x7e, 0x10, 0x15, 0x0c, 0x07, 0x81, 0xf1, 0x5e, 0xcf,
+	0x32, 0x55, 0x28, 0xf1, 0x7f, 0xfa, 0x6b, 0x48, 0xc9, 0x29, 0x5e, 0xd7, 0xe9, 0x78, 0x8c, 0xfc,
+	0x08, 0x26, 0x5d, 0xe6, 0xf5, 0x6c, 0xae, 0x36, 0xcd, 0x6d, 0x9f, 0x33, 0xa8, 0x15, 0x7a, 0xd0,
+	0x51, 0x5d, 0x57, 0xd3, 0x68, 0x01, 0x48, 0x54, 0x4a, 0x66, 0x61, 0xaa, 0x7a, 0xb4, 0xbb, 0xbb,
+	0x5f, 0xad, 0xa6, 0x47, 0xc4, 0xc3, 0xd3, 0xed, 0xd2, 0xf3, 0x23, 0x7d, 0x3f, 0xad, 0xd1, 0x7f,
+	0x8c, 0xc3, 0xe4, 0x2b, 0xa4, 0x20, 0xd7, 0x60, 0xea, 0x84, 0xb9, 0x9e, 0xe5, 0x74, 0xc2, 0x8e,
+	0xf5, 0x47, 0xc9, 0x23, 0x98, 0x56, 0xa9, 0xcd, 0x4f, 0x1b, 0x0b, 0xbe, 0x79, 0xdb, 0x72, 0x3c,
+	0x78, 0xe8, 0xfb, 0xba, 0x71, 0x59, 0x67, 0xec, 0xbf, 0xcf, 0x3a, 0xe3, 0x17, 0xcd, 0x3a, 0xe4,
+	0x53, 0x48, 0xa9, 0xfd, 0x2e, 0xf6, 0xb4, 0xbf, 0x75, 0x49, 0x78, 0xa6, 0xd8, 0xdd, 0xc1, 0xd9,
+	0xb3, 0x66, 0x7f, 0xd8, 0x23, 0xbb, 0x30, 0xa7, 0x10, 0x5a, 0x98, 0xb8, 0x72, 0x93, 0x89, 0xf9,
+	0x2a, 0x88, 0xa1, 0x68, 0x55, 0xb2, 0xdb, 0x85, 0x39, 0x79, 0xb2, 0xfc, 0x13, 0x30, 0x95, 0x78,
+	0x02, 0x42, 0x20, 0x2c, 0x78, 0x80, 0x7e, 0x0a, 0x99, 0xc1, 0x05, 0x61, 0x70, 0xa3, 0x6e, 0x78,
+	0x2c, 0xb7, 0xa6, 0x80, 0x84, 0xa4, 0x70, 0x68, 0xd5, 0xa5, 0x39, 0x7b, 0x06, 0x37, 0x76, 0xd2,
+	0x02, 0x68, 0x36, 0x70, 0xe0, 0xf5, 0x05, 0xa1, 0x25, 0x94, 0xd4, 0x6c, 0xf2, 0x1a, 0xb2, 0xc1,
+	0x2b, 0xc5, 0x07, 0x5d, 0x57, 0x21, 0x42, 0xd0, 0x6d, 0x21, 0x3b, 0x13, 0x16, 0xcd, 0x92, 0x6a,
+	0x0a, 0x81, 0xfe, 0x59, 0x83, 0x74, 0x95, 0xd9, 0xcd, 0x8b, 0xed, 0xf0, 0x61, 0xcd, 0xe0, 0x40,
+	0x70, 0x87, 0x57, 0x60, 0x3e, 0x2c, 0x49, 0xde, 0xdd, 0x24, 0x03, 0x73, 0x2f, 0xca, 0x2f, 0x6b,
+	0xd5, 0xa3, 0x4a, 0xa5, 0xac, 0xbf, 0xdc, 0xdf, 0x4b, 0x8f, 0x8a, 0xa1, 0xa3, 0x17, 0xcf, 0x5e,
+	0x94, 0x5f, 0xbf, 0xa8, 0xed, 0xeb, 0x7a, 0x59, 0x4f, 0x8f, 0xd1, 0x32, 0x64, 0xca, 0xcd, 0xed,
+	0x16, 0xeb, 0xf0, 0x6a, 0xaf, 0xee, 0x35, 0x5c, 0xab, 0xce, 0x5c, 0x91, 0x07, 0x9d, 0xa6, 0x21,
+	0x06, 0xfb, 0x99, 0x46, 0x9f, 0x51, 0x23, 0x25, 0x53, 0xe4, 0x50, 0x75, 0x2b, 0xf7, 0x4f, 0xf4,
+	0xb4, 0x1c, 0x28, 0x99, 0xf4, 0x09, 0xc0, 0x21, 0x6b, 0xd7, 0x99, 0xeb, 0x1d, 0x5b, 0x5d, 0x81,
+	0x84, 0xbb, 0xa6, 0xd6, 0x31, 0xda, 0xcc, 0x47, 0xc2, 0x91, 0x17, 0x46, 0x9b, 0xa9, 0x34, 0x31,
+	0xea, 0xa7, 0x09, 0xba, 0x0f, 0xa9, 0xa7, 0xb6, 0xf3, 0xee, 0x90, 0x71, 0x43, 0xc4, 0x82, 0x3c,
+	0x84, 0xc9, 0x36, 0x0b, 0x64, 0xcc, 0xf5, 0x42, 0xb0, 0x84, 0x70, 0x9a, 0xdd, 0x1a, 0x8a, 0x6b,
+	0xf2, 0xaa, 0xd2, 0x95, 0xf2, 0xfd, 0xbf, 0x15, 0x60, 0x4e, 0x1e, 0xec, 0x2a, 0x73, 0x45, 0x90,
+	0xc8, 0x6b, 0x98, 0x3b, 0x60, 0x3c, 0x60, 0xd8, 0x72, 0x41, 0x96, 0x59, 0x05, 0xbf, 0xcc, 0x2a,
+	0xec, 0x8b, 0x32, 0x2b, 0xdf, 0x3f, 0x19, 0x03, 0x5d, 0x9a, 0xff, 0xf2, 0xef, 0xff, 0xfa, 0x7a,
+	0x74, 0x91, 0x10, 0xac, 0xd8, 0x4e, 0xee, 0x15, 0xdb, 0x03, 0x9c, 0x37, 0x90, 0x3e, 0xea, 0x9a,
+	0x06, 0x67, 0x01, 0xec, 0x18, 0x8c, 0x7c, 0x02, 0x1f, 0x5d, 0x47, 0xec, 0x15, 0x1a, 0x83, 0xfd,
+	0x58, 0xdb, 0x24, 0x7b, 0x30, 0x73, 0xc0, 0xb8, 0x4a, 0x52, 0x49, 0x36, 0xf7, 0xf3, 0x80, 0xd4,
+	0xa3, 0x0b, 0x88, 0x39, 0x43, 0xa6, 0x14, 0x26, 0x79, 0x03, 0x99, 0xe7, 0x96, 0xc7, 0xc3, 0x19,
+	0x3e, 0x09, 0x6d, 0x29, 0x2e, 0xd5, 0x7b, 0xf4, 0x0a, 0x82, 0x66, 0x49, 0xc6, 0x37, 0xd4, 0xea,
+	0x23, 0x55, 0x61, 0xe1, 0x80, 0x85, 0xd0, 0x09, 0x14, 0x54, 0x01, 0x5a, 0xda, 0xcb, 0xc7, 0xde,
+	0x1d, 0xf4, 0x2a, 0xe2, 0xe5, 0xc8, 0x72, 0x04, 0xaf, 0xf8, 0xde, 0x32, 0xbf, 0x20, 0x3a, 0xa4,
+	0x84, 0xcd, 0xdb, 0x7e, 0x22, 0x4d, 0x32, 0x37, 0x3d, 0x94, 0x86, 0x3d, 0x9a, 0x43, 0x64, 0x42,
+	0xd2, 0x3e, 0x72, 0x3f, 0x19, 0x33, 0x20, 0x02, 0xf3, 0x79, 0x38, 0xaf, 0x26, 0x21, 0x2f, 0xc7,
+	0x66, 0x68, 0x8f, 0x5e, 0x43, 0xfc, 0x2b, 0x64, 0xc5, 0xc7, 0x1f, 0x4a, 0xf0, 0xe4, 0x97, 0x90,
+	0x3e, 0x60, 0x61, 0x96, 0x90, 0x43, 0xe2, 0x53, 0x3f, 0xbd, 0x89, 0xb8, 0x57, 0xc9, 0x5a, 0x02,
+	0xae, 0xf4, 0x4b, 0x13, 0x96, 0x23, 0x6b, 0xa8, 0x38, 0x2e, 0xf7, 0xe2, 0x7d, 0xae, 0xf4, 0x50,
+	0x83, 0x6e, 0x22, 0xc3, 0x4d, 0x42, 0xcf, 0x62, 0x28, 0x76, 0x11, 0xed, 0x73, 0x58, 0x1c, 0x5e,
+	0x84, 0x00, 0x21, 0x4b, 0x31, 0xc8, 0x25, 0x33, 0x9f, 0x8d, 0x19, 0xa6, 0x0f, 0x90, 0xaf, 0x40,
+	0x3e, 0x3e, 0x9f, 0xaf, 0xf8, 0x5e, 0xfc, 0xa9, 0x89, 0x15, 0xfe, 0x4e, 0x83, 0x95, 0x7d, 0xac,
+	0xd6, 0x2e, 0xcc, 0x9e, 0x74, 0xba, 0x9e, 0xa0, 0x01, 0x0f, 0xe9, 0xd6, 0x65, 0x0c, 0x28, 0xaa,
+	0x52, 0xf1, 0x2b, 0x0d, 0x72, 0x7b, 0x96, 0xf7, 0xbd, 0x18, 0xf2, 0x43, 0x34, 0xe4, 0x11, 0x7d,
+	0x70, 0x29, 0x43, 0x4c, 0xc9, 0x4e, 0xcc, 0x98, 0x98, 0x8b, 0x3c, 0x19, 0x8e, 0x39, 0x09, 0x25,
+	0x47, 0x94, 0x5f, 0x30, 0xe2, 0x4d, 0xc4, 0xfa, 0x8d, 0x06, 0x6b, 0x32, 0x97, 0x45, 0x88, 0x5e,
+	0xa2, 0x19, 0x6b, 0x11, 0x02, 0x1c, 0x97, 0x73, 0x12, 0x97, 0x7e, 0x07, 0x4d, 0xb8, 0x4d, 0x2f,
+	0x60, 0x82, 0xc8, 0x78, 0xbf, 0xd5, 0x60, 0x3d, 0xc6, 0x8a, 0x43, 0x91, 0xd9, 0xa5, 0x19, 0xab,
+	0x21, 0x33, 0x50, 0x70, 0xe8, 0x98, 0xe7, 0x58, 0x51, 0x40, 0x2b, 0x36, 0xe8, 0x07, 0x67, 0x5a,
+	0x21, 0xef, 0x0f, 0x61, 0x46, 0x0b, 0x56, 0x22, 0x2e, 0x47, 0xaa, 0xb0, 0xcf, 0xb3, 0x51, 0x5b,
+	0x3c, 0xfa, 0x11, 0x72, 0x7d, 0x48, 0x2e, 0xc2, 0x45, 0x38, 0xac, 0xc6, 0xc6, 0x56, 0x15, 0x4e,
+	0x41, 0xb2, 0x95, 0x88, 0xff, 0xa5, 0x12, 0xbd, 0x8b, 0x84, 0x9b, 0x64, 0xe3, 0x5c, 0x17, 0xab,
+	0x1a, 0x8e, 0x7c, 0xad, 0xc1, 0x8d, 0x84, 0x58, 0x23, 0xa6, 0xf4, 0xf4, 0x8d, 0x78, 0xc2, 0x8b,
+	0x44, 0x7d, 0x0b, 0x4d, 0xba, 0x43, 0x2f, 0x6c, 0x92, 0x70, 0x7a, 0x19, 0x66, 0x85, 0x2f, 0xce,
+	0x4b, 0xcc, 0x0b, 0xe1, 0xd2, 0xd3, 0xa3, 0x2b, 0x48, 0x96, 0x21, 0x0b, 0x3e, 0x99, 0x9f, 0x89,
+	0xcb, 0x30, 0x37, 0x00, 0x2c, 0x99, 0xc9, 0x90, 0xb3, 0x03, 0x37, 0xc7, 0x5c, 0x75, 0x12, 0xce,
+	0x32, 0x3d, 0x72, 0x04, 0x69, 0x9d, 0x35, 0x9c, 0x4e, 0xc3, 0xb2, 0x99, 0x6f, 0x66, 0x70, 0x6e,
+	0xa2, 0x3f, 0xd6, 0x10, 0x73, 0x99, 0x46, 0x31, 0xc5, 0xc2, 0xf7, 0xf1, 0x9a, 0x8f, 0xb9, 0x2a,
+	0x86, 0x4a, 0x7c, 0x1f, 0x86, 0x2c, 0x0e, 0xad, 0x54, 0xde, 0x0d, 0x3f, 0x81, 0xd4, 0xae, 0xcb,
+	0x0c, 0xae, 0x4c, 0x23, 0x43, 0xb3, 0x23, 0x68, 0xaa, 0xb0, 0xa1, 0xc3, 0x7e, 0x13, 0x26, 0xbd,
+	0x86, 0x94, 0x4c, 0xc2, 0x31, 0x56, 0x25, 0x2d, 0xf2, 0x03, 0xc4, 0x5b, 0xa7, 0xab, 0x71, 0xd6,
+	0xf9, 0x69, 0xf5, 0xe7, 0x30, 0xa7, 0xb2, 0xea, 0x25, 0x90, 0xd5, 0xdd, 0x48, 0xd7, 0x62, 0x91,
+	0xfd, 0x3c, 0xf9, 0x1a, 0x52, 0x3a, 0xab, 0x3b, 0x0e, 0xff, 0xde, 0x6c, 0x76, 0x11, 0x4e, 0x00,
+	0xef, 0x31, 0x9b, 0xf1, 0xef, 0xe0, 0x8c, 0xcd, 0x78, 0x60, 0x13, 0xe1, 0x48, 0x0f, 0xe6, 0xf6,
+	0x9c, 0x77, 0x1d, 0xdb, 0x31, 0xcc, 0x52, 0xdb, 0x68, 0xb1, 0xc1, 0xbd, 0x82, 0x8f, 0xbe, 0x2c,
+	0xbf, 0xe4, 0x13, 0x96, 0xbb, 0xcc, 0xc5, 0x76, 0xa1, 0x78, 0x55, 0xa0, 0x8f, 0x90, 0xe3, 0x2e,
+	0xfd, 0x28, 0x96, 0xc3, 0x12, 0x10, 0x35, 0x53, 0x61, 0x78, 0xc5, 0xf7, 0xa2, 0x08, 0xff, 0x42,
+	0x04, 0xf7, 0x4b, 0x0d, 0x96, 0x0f, 0x18, 0x0f, 0x71, 0xc8, 0xc6, 0x40, 0xb2, 0x01, 0x71, 0xc3,
+	0xf4, 0x31, 0x1a, 0xf0, 0x80, 0xdc, 0xbf, 0x84, 0x01, 0x45, 0x4f, 0x32, 0xf5, 0xb0, 0x4c, 0x0a,
+	0xe1, 0x5d, 0x92, 0x5d, 0x25, 0x19, 0x72, 0x99, 0xe5, 0x93, 0xa6, 0x2c, 0x02, 0x43, 0x48, 0xde,
+	0x50, 0x44, 0xe3, 0xd8, 0x3c, 0xfa, 0x31, 0xd2, 0xdd, 0x22, 0x37, 0x2f, 0x42, 0x47, 0x3e, 0x87,
+	0xec, 0xae, 0xa8, 0x67, 0xed, 0x0b, 0xae, 0x30, 0x36, 0xc0, 0x6a, 0x85, 0x9b, 0x97, 0x5a, 0xe1,
+	0x1f, 0x34, 0xc8, 0x6e, 0x37, 0xb8, 0x75, 0x62, 0x70, 0x86, 0x2c, 0x32, 0x57, 0x5f, 0x92, 0x7a,
+	0x17, 0xa9, 0x3f, 0xa1, 0xff, 0x7f, 0x99, 0xd0, 0xca, 0xe1, 0x1e, 0xf2, 0x89, 0x8d, 0xf6, 0x7b,
+	0x0d, 0x32, 0x3a, 0x3b, 0x61, 0x2e, 0xff, 0x9f, 0x18, 0xe2, 0x22, 0xb5, 0x30, 0xa4, 0x02, 0x0b,
+	0x83, 0x9b, 0x20, 0x5a, 0x2f, 0xcf, 0xf9, 0x16, 0xc9, 0x42, 0x99, 0x22, 0xe5, 0x1a, 0xc9, 0xc7,
+	0x52, 0xca, 0x02, 0xf9, 0x0d, 0x64, 0x03, 0x88, 0xed, 0x5d, 0x7c, 0x05, 0x0d, 0xa3, 0x66, 0xfa,
+	0xa8, 0xbe, 0x98, 0xde, 0x46, 0xe4, 0x1b, 0xe4, 0x5a, 0x3c, 0x72, 0x5b, 0xbd, 0xca, 0x7a, 0xa4,
+	0x03, 0x4b, 0xd2, 0x5b, 0xc3, 0x04, 0x51, 0xd0, 0xc4, 0x14, 0xa4, 0xaa, 0x3f, 0x7a, 0x1e, 0x99,
+	0x70, 0xd0, 0x51, 0xd0, 0x41, 0x17, 0x2b, 0x2e, 0xcf, 0xf6, 0x92, 0x2c, 0x2a, 0x19, 0x2c, 0x86,
+	0x61, 0x2f, 0x53, 0xd7, 0x6c, 0x20, 0x01, 0x25, 0xd7, 0x13, 0x09, 0xfc, 0x7a, 0xe6, 0xb3, 0xa0,
+	0xf5, 0xb2, 0x6f, 0x95, 0x74, 0xd5, 0x67, 0xa3, 0xbd, 0x2f, 0x2f, 0xe9, 0x5e, 0x95, 0x4d, 0x33,
+	0xa2, 0x63, 0xf7, 0x60, 0xa0, 0x3f, 0xe4, 0x99, 0x08, 0x1e, 0xbd, 0x81, 0x70, 0xab, 0xe4, 0x4a,
+	0x1c, 0x9c, 0xbc, 0xab, 0x6b, 0x90, 0x1e, 0x58, 0xac, 0x9c, 0x92, 0x64, 0xf2, 0x62, 0x4c, 0xaf,
+	0xcd, 0xf3, 0x5b, 0x07, 0x64, 0x69, 0x88, 0x44, 0xb9, 0xe4, 0x29, 0xa4, 0xab, 0xdc, 0x65, 0x46,
+	0xbb, 0x62, 0x34, 0xde, 0x32, 0xee, 0x95, 0x7b, 0x9c, 0x2c, 0x87, 0x3c, 0x2d, 0x05, 0xe5, 0x1e,
+	0x4f, 0xdc, 0x40, 0x23, 0x1b, 0x1a, 0xd9, 0xc7, 0x92, 0x87, 0x59, 0x27, 0x4c, 0x01, 0x95, 0x3a,
+	0x67, 0xf4, 0x0e, 0xa2, 0xf8, 0xa5, 0x0e, 0x1d, 0xb9, 0xab, 0x91, 0x67, 0x90, 0x55, 0x30, 0xbb,
+	0xc7, 0x46, 0xa7, 0xc5, 0xb0, 0xe3, 0x97, 0xbc, 0xe4, 0x5c, 0x08, 0x29, 0x30, 0x05, 0xc1, 0x8e,
+	0x60, 0xbe, 0x1f, 0x10, 0xf9, 0xd1, 0x27, 0x5c, 0x94, 0x47, 0xdd, 0x95, 0xb4, 0x59, 0x95, 0xb7,
+	0xfc, 0x98, 0x64, 0x64, 0xfd, 0x14, 0xfc, 0xc0, 0x10, 0xd7, 0xa3, 0xcc, 0xc7, 0x0d, 0xd2, 0xeb,
+	0x48, 0x91, 0xa7, 0xfd, 0x80, 0x84, 0x5a, 0x9e, 0xe2, 0x90, 0xbd, 0x42, 0xbb, 0x83, 0xe8, 0xb1,
+	0x2f, 0xed, 0xc1, 0xcf, 0x06, 0x51, 0xc3, 0x43, 0xa8, 0xd2, 0x70, 0x13, 0x32, 0x32, 0x59, 0x7c,
+	0x37, 0xc3, 0x3f, 0x44, 0x8a, 0x6b, 0xf9, 0x33, 0x28, 0x84, 0xf5, 0x26, 0x64, 0x64, 0x15, 0x74,
+	0x2e, 0x4b, 0xd2, 0x7e, 0x52, 0x6b, 0xd9, 0x3c, 0x6b, 0x2d, 0xea, 0x60, 0x84, 0x3e, 0x9d, 0x9c,
+	0x7b, 0x30, 0x42, 0x1e, 0x8b, 0x1c, 0x8c, 0x10, 0x0b, 0x79, 0x8e, 0xc5, 0x36, 0x5e, 0x3d, 0x5e,
+	0x7c, 0xb1, 0x2d, 0x65, 0x7e, 0x05, 0x47, 0x56, 0x93, 0x2f, 0x1e, 0x8f, 0xfc, 0x0c, 0xa6, 0xfd,
+	0x96, 0x6c, 0x08, 0x2c, 0x97, 0xd4, 0xdb, 0xa5, 0xb7, 0x10, 0xf6, 0x3a, 0xbd, 0x1a, 0x0b, 0xeb,
+	0x31, 0xbb, 0x59, 0xe3, 0x02, 0xed, 0x15, 0xd6, 0x47, 0xa1, 0x96, 0xf6, 0xf0, 0xbb, 0x67, 0xa4,
+	0xe7, 0x1d, 0xcd, 0x3c, 0xe2, 0x18, 0x09, 0x3d, 0xf5, 0xd2, 0x69, 0xd5, 0xc9, 0x67, 0x40, 0x0e,
+	0x18, 0x1f, 0xea, 0x6a, 0x0f, 0x35, 0xa8, 0xe2, 0x1a, 0xdf, 0x51, 0x7f, 0x84, 0xb1, 0xb1, 0x87,
+	0x4e, 0x3c, 0x98, 0xab, 0x5a, 0xed, 0x9e, 0x6d, 0x70, 0x86, 0xf3, 0xc9, 0x5a, 0xdf, 0x11, 0xc1,
+	0x61, 0xf5, 0xa1, 0x28, 0xe9, 0xce, 0x8f, 0x34, 0x0d, 0xc2, 0x3e, 0x52, 0x48, 0x35, 0x81, 0x24,
+	0x76, 0xe6, 0x2e, 0xcc, 0xf4, 0xdb, 0xd7, 0xe4, 0x8a, 0x4f, 0x18, 0x69, 0x6c, 0xe7, 0x93, 0x45,
+	0x74, 0x84, 0x1c, 0x02, 0xc8, 0x37, 0x1e, 0x6c, 0xf0, 0xa4, 0x82, 0x15, 0x41, 0xe2, 0x86, 0x56,
+	0xaf, 0x8a, 0x74, 0x5e, 0xd8, 0x38, 0x98, 0xad, 0x5e, 0x66, 0xd5, 0x7b, 0xce, 0x25, 0xf0, 0x06,
+	0x6f, 0x64, 0x27, 0xf7, 0x8a, 0x81, 0xe9, 0x02, 0xf0, 0x18, 0xb2, 0x55, 0x6e, 0xb8, 0xdc, 0xff,
+	0xd6, 0x26, 0x4a, 0x3c, 0xa7, 0x43, 0xfa, 0x9f, 0x21, 0x87, 0xbe, 0xc1, 0x0d, 0x0e, 0x47, 0x68,
+	0xf7, 0xa9, 0x23, 0x48, 0xfb, 0xdd, 0x4b, 0x4f, 0x60, 0xd6, 0xf0, 0x2b, 0x88, 0xd8, 0x76, 0x8f,
+	0xb5, 0xcd, 0x1d, 0x1b, 0xb2, 0x8e, 0xdb, 0xc2, 0x0c, 0xdc, 0x70, 0x5c, 0x53, 0xe1, 0xec, 0xa4,
+	0x64, 0x7f, 0xb9, 0x82, 0x9f, 0xe2, 0x7f, 0x51, 0x68, 0x59, 0xfc, 0xb8, 0x57, 0x17, 0xf1, 0x2b,
+	0xfa, 0x9a, 0xea, 0xf7, 0x0e, 0x77, 0xfc, 0x5f, 0x3f, 0x6c, 0x15, 0x5b, 0x8e, 0x1a, 0xfb, 0xeb,
+	0xe8, 0x72, 0xd9, 0xc7, 0x7b, 0x15, 0x6c, 0x57, 0x57, 0x46, 0x2b, 0x63, 0x95, 0xf1, 0xca, 0x44,
+	0x65, 0xb2, 0x32, 0x55, 0x99, 0xae, 0x4f, 0xe2, 0xdc, 0xad, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff,
+	0x68, 0xe5, 0x85, 0x3e, 0x49, 0x21, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// VolthaServiceClient is the client API for VolthaService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type VolthaServiceClient interface {
+	// Get the membership group of a Voltha Core
+	GetMembership(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Membership, error)
+	// Set the membership group of a Voltha Core
+	UpdateMembership(ctx context.Context, in *Membership, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get high level information on the Voltha cluster
+	GetVoltha(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Voltha, error)
+	// List all Voltha cluster core instances
+	ListCoreInstances(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*CoreInstances, error)
+	// Get details on a Voltha cluster instance
+	GetCoreInstance(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*CoreInstance, error)
+	// List all active adapters (plugins) in the Voltha cluster
+	ListAdapters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Adapters, error)
+	// List all logical devices managed by the Voltha cluster
+	ListLogicalDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LogicalDevices, error)
+	// Get additional information on a given logical device
+	GetLogicalDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalDevice, error)
+	// List ports of a logical device
+	ListLogicalDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalPorts, error)
+	// Gets a logical device port
+	GetLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*LogicalPort, error)
+	// Enables a logical device port
+	EnableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Disables a logical device port
+	DisableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all flows of a logical device
+	ListLogicalDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error)
+	// Update flow table for logical device
+	UpdateLogicalDeviceFlowTable(ctx context.Context, in *openflow_13.FlowTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Update meter table for logical device
+	UpdateLogicalDeviceMeterTable(ctx context.Context, in *openflow_13.MeterModUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all meters of a logical device
+	ListLogicalDeviceMeters(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Meters, error)
+	// List all flow groups of a logical device
+	ListLogicalDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error)
+	// Update group table for device
+	UpdateLogicalDeviceFlowGroupTable(ctx context.Context, in *openflow_13.FlowGroupTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all physical devices controlled by the Voltha cluster
+	ListDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Devices, error)
+	// List all physical devices IDs controlled by the Voltha cluster
+	ListDeviceIds(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*common.IDs, error)
+	// Request to a voltha Core to reconcile a set of devices based on their IDs
+	ReconcileDevices(ctx context.Context, in *common.IDs, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get more information on a given physical device
+	GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Device, error)
+	// Pre-provision a new physical device
+	CreateDevice(ctx context.Context, in *Device, opts ...grpc.CallOption) (*Device, error)
+	// Enable a device.  If the device was in pre-provisioned state then it
+	// will transition to ENABLED state.  If it was is DISABLED state then it
+	// will transition to ENABLED state as well.
+	EnableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Disable a device
+	DisableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Reboot a device
+	RebootDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Delete a device
+	DeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Request an image download to the standby partition
+	// of a device.
+	// Note that the call is expected to be non-blocking.
+	DownloadImage(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Get image download status on a device
+	// The request retrieves progress on device and updates db record
+	GetImageDownloadStatus(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error)
+	// Get image download db record
+	GetImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error)
+	// List image download db records for a given device
+	ListImageDownloads(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*ImageDownloads, error)
+	// Cancel an existing image download process on a device
+	CancelImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Activate the specified image at a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	ActivateImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Revert the specified image at standby partition
+	// to active partition, and revert to previous image
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// previous image running on device
+	// Note that the call is expected to be non-blocking.
+	RevertImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// List ports of a device
+	ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Ports, error)
+	// List pm config of a device
+	ListDevicePmConfigs(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*PmConfigs, error)
+	// Update the pm config of a device
+	UpdateDevicePmConfigs(ctx context.Context, in *PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all flows of a device
+	ListDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error)
+	// List all flow groups of a device
+	ListDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error)
+	// List device types known to Voltha
+	ListDeviceTypes(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceTypes, error)
+	// Get additional information on a device type
+	GetDeviceType(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceType, error)
+	// List all device sharding groups
+	ListDeviceGroups(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceGroups, error)
+	// Stream control packets to the dataplane
+	StreamPacketsOut(ctx context.Context, opts ...grpc.CallOption) (VolthaService_StreamPacketsOutClient, error)
+	// Receive control packet stream
+	ReceivePacketsIn(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceivePacketsInClient, error)
+	ReceiveChangeEvents(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceiveChangeEventsClient, error)
+	// Get additional information on a device group
+	GetDeviceGroup(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceGroup, error)
+	CreateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error)
+	// Get all filters present for a device
+	GetEventFilter(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*EventFilters, error)
+	UpdateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error)
+	DeleteEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get all the filters present
+	ListEventFilters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*EventFilters, error)
+	GetImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Images, error)
+	SelfTest(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*SelfTestResponse, error)
+	// OpenOMCI MIB information
+	GetMibDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.MibDeviceData, error)
+	// OpenOMCI ALARM information
+	GetAlarmDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.AlarmDeviceData, error)
+	// Simulate an Alarm
+	SimulateAlarm(ctx context.Context, in *SimulateAlarmRequest, opts ...grpc.CallOption) (*common.OperationResp, error)
+	Subscribe(ctx context.Context, in *OfAgentSubscriber, opts ...grpc.CallOption) (*OfAgentSubscriber, error)
+	EnablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	DisablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	// omci start and stop cli implementation
+	StartOmciTestAction(ctx context.Context, in *OmciTestRequest, opts ...grpc.CallOption) (*TestResponse, error)
+}
+
+type volthaServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewVolthaServiceClient(cc *grpc.ClientConn) VolthaServiceClient {
+	return &volthaServiceClient{cc}
+}
+
+func (c *volthaServiceClient) GetMembership(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Membership, error) {
+	out := new(Membership)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetMembership", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateMembership(ctx context.Context, in *Membership, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateMembership", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetVoltha(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Voltha, error) {
+	out := new(Voltha)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetVoltha", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListCoreInstances(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*CoreInstances, error) {
+	out := new(CoreInstances)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListCoreInstances", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetCoreInstance(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*CoreInstance, error) {
+	out := new(CoreInstance)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetCoreInstance", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListAdapters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Adapters, error) {
+	out := new(Adapters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListAdapters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LogicalDevices, error) {
+	out := new(LogicalDevices)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetLogicalDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalDevice, error) {
+	out := new(LogicalDevice)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetLogicalDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalPorts, error) {
+	out := new(LogicalPorts)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDevicePorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*LogicalPort, error) {
+	out := new(LogicalPort)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnableLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisableLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error) {
+	out := new(openflow_13.Flows)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceFlows", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceFlowTable(ctx context.Context, in *openflow_13.FlowTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceFlowTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceMeterTable(ctx context.Context, in *openflow_13.MeterModUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceMeterTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceMeters(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Meters, error) {
+	out := new(openflow_13.Meters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceMeters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error) {
+	out := new(openflow_13.FlowGroups)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceFlowGroups", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceFlowGroupTable(ctx context.Context, in *openflow_13.FlowGroupTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceFlowGroupTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Devices, error) {
+	out := new(Devices)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceIds(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*common.IDs, error) {
+	out := new(common.IDs)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceIds", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ReconcileDevices(ctx context.Context, in *common.IDs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ReconcileDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Device, error) {
+	out := new(Device)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) CreateDevice(ctx context.Context, in *Device, opts ...grpc.CallOption) (*Device, error) {
+	out := new(Device)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CreateDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) RebootDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/RebootDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DeleteDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DownloadImage(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DownloadImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetImageDownloadStatus(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error) {
+	out := new(ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImageDownloadStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error) {
+	out := new(ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListImageDownloads(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*ImageDownloads, error) {
+	out := new(ImageDownloads)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListImageDownloads", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) CancelImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CancelImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ActivateImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ActivateImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) RevertImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/RevertImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Ports, error) {
+	out := new(Ports)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevicePorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevicePmConfigs(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*PmConfigs, error) {
+	out := new(PmConfigs)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevicePmConfigs", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateDevicePmConfigs(ctx context.Context, in *PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateDevicePmConfigs", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error) {
+	out := new(openflow_13.Flows)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceFlows", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error) {
+	out := new(openflow_13.FlowGroups)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceFlowGroups", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceTypes(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceTypes, error) {
+	out := new(DeviceTypes)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceTypes", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetDeviceType(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceType, error) {
+	out := new(DeviceType)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetDeviceType", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceGroups(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceGroups, error) {
+	out := new(DeviceGroups)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceGroups", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) StreamPacketsOut(ctx context.Context, opts ...grpc.CallOption) (VolthaService_StreamPacketsOutClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[0], "/voltha.VolthaService/StreamPacketsOut", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceStreamPacketsOutClient{stream}
+	return x, nil
+}
+
+type VolthaService_StreamPacketsOutClient interface {
+	Send(*openflow_13.PacketOut) error
+	CloseAndRecv() (*empty.Empty, error)
+	grpc.ClientStream
+}
+
+type volthaServiceStreamPacketsOutClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceStreamPacketsOutClient) Send(m *openflow_13.PacketOut) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *volthaServiceStreamPacketsOutClient) CloseAndRecv() (*empty.Empty, error) {
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	m := new(empty.Empty)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) ReceivePacketsIn(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceivePacketsInClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[1], "/voltha.VolthaService/ReceivePacketsIn", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceReceivePacketsInClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type VolthaService_ReceivePacketsInClient interface {
+	Recv() (*openflow_13.PacketIn, error)
+	grpc.ClientStream
+}
+
+type volthaServiceReceivePacketsInClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceReceivePacketsInClient) Recv() (*openflow_13.PacketIn, error) {
+	m := new(openflow_13.PacketIn)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) ReceiveChangeEvents(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceiveChangeEventsClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[2], "/voltha.VolthaService/ReceiveChangeEvents", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceReceiveChangeEventsClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type VolthaService_ReceiveChangeEventsClient interface {
+	Recv() (*openflow_13.ChangeEvent, error)
+	grpc.ClientStream
+}
+
+type volthaServiceReceiveChangeEventsClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceReceiveChangeEventsClient) Recv() (*openflow_13.ChangeEvent, error) {
+	m := new(openflow_13.ChangeEvent)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) GetDeviceGroup(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceGroup, error) {
+	out := new(DeviceGroup)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetDeviceGroup", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) CreateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error) {
+	out := new(EventFilter)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CreateEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetEventFilter(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*EventFilters, error) {
+	out := new(EventFilters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error) {
+	out := new(EventFilter)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DeleteEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DeleteEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListEventFilters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*EventFilters, error) {
+	out := new(EventFilters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListEventFilters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Images, error) {
+	out := new(Images)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImages", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) SelfTest(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*SelfTestResponse, error) {
+	out := new(SelfTestResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/SelfTest", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetMibDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.MibDeviceData, error) {
+	out := new(omci.MibDeviceData)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetMibDeviceData", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetAlarmDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.AlarmDeviceData, error) {
+	out := new(omci.AlarmDeviceData)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetAlarmDeviceData", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) SimulateAlarm(ctx context.Context, in *SimulateAlarmRequest, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/SimulateAlarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) Subscribe(ctx context.Context, in *OfAgentSubscriber, opts ...grpc.CallOption) (*OfAgentSubscriber, error) {
+	out := new(OfAgentSubscriber)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/Subscribe", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) StartOmciTestAction(ctx context.Context, in *OmciTestRequest, opts ...grpc.CallOption) (*TestResponse, error) {
+	out := new(TestResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/StartOmciTestAction", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// VolthaServiceServer is the server API for VolthaService service.
+type VolthaServiceServer interface {
+	// Get the membership group of a Voltha Core
+	GetMembership(context.Context, *empty.Empty) (*Membership, error)
+	// Set the membership group of a Voltha Core
+	UpdateMembership(context.Context, *Membership) (*empty.Empty, error)
+	// Get high level information on the Voltha cluster
+	GetVoltha(context.Context, *empty.Empty) (*Voltha, error)
+	// List all Voltha cluster core instances
+	ListCoreInstances(context.Context, *empty.Empty) (*CoreInstances, error)
+	// Get details on a Voltha cluster instance
+	GetCoreInstance(context.Context, *common.ID) (*CoreInstance, error)
+	// List all active adapters (plugins) in the Voltha cluster
+	ListAdapters(context.Context, *empty.Empty) (*Adapters, error)
+	// List all logical devices managed by the Voltha cluster
+	ListLogicalDevices(context.Context, *empty.Empty) (*LogicalDevices, error)
+	// Get additional information on a given logical device
+	GetLogicalDevice(context.Context, *common.ID) (*LogicalDevice, error)
+	// List ports of a logical device
+	ListLogicalDevicePorts(context.Context, *common.ID) (*LogicalPorts, error)
+	// Gets a logical device port
+	GetLogicalDevicePort(context.Context, *LogicalPortId) (*LogicalPort, error)
+	// Enables a logical device port
+	EnableLogicalDevicePort(context.Context, *LogicalPortId) (*empty.Empty, error)
+	// Disables a logical device port
+	DisableLogicalDevicePort(context.Context, *LogicalPortId) (*empty.Empty, error)
+	// List all flows of a logical device
+	ListLogicalDeviceFlows(context.Context, *common.ID) (*openflow_13.Flows, error)
+	// Update flow table for logical device
+	UpdateLogicalDeviceFlowTable(context.Context, *openflow_13.FlowTableUpdate) (*empty.Empty, error)
+	// Update meter table for logical device
+	UpdateLogicalDeviceMeterTable(context.Context, *openflow_13.MeterModUpdate) (*empty.Empty, error)
+	// List all meters of a logical device
+	ListLogicalDeviceMeters(context.Context, *common.ID) (*openflow_13.Meters, error)
+	// List all flow groups of a logical device
+	ListLogicalDeviceFlowGroups(context.Context, *common.ID) (*openflow_13.FlowGroups, error)
+	// Update group table for device
+	UpdateLogicalDeviceFlowGroupTable(context.Context, *openflow_13.FlowGroupTableUpdate) (*empty.Empty, error)
+	// List all physical devices controlled by the Voltha cluster
+	ListDevices(context.Context, *empty.Empty) (*Devices, error)
+	// List all physical devices IDs controlled by the Voltha cluster
+	ListDeviceIds(context.Context, *empty.Empty) (*common.IDs, error)
+	// Request to a voltha Core to reconcile a set of devices based on their IDs
+	ReconcileDevices(context.Context, *common.IDs) (*empty.Empty, error)
+	// Get more information on a given physical device
+	GetDevice(context.Context, *common.ID) (*Device, error)
+	// Pre-provision a new physical device
+	CreateDevice(context.Context, *Device) (*Device, error)
+	// Enable a device.  If the device was in pre-provisioned state then it
+	// will transition to ENABLED state.  If it was is DISABLED state then it
+	// will transition to ENABLED state as well.
+	EnableDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Disable a device
+	DisableDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Reboot a device
+	RebootDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Delete a device
+	DeleteDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Request an image download to the standby partition
+	// of a device.
+	// Note that the call is expected to be non-blocking.
+	DownloadImage(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Get image download status on a device
+	// The request retrieves progress on device and updates db record
+	GetImageDownloadStatus(context.Context, *ImageDownload) (*ImageDownload, error)
+	// Get image download db record
+	GetImageDownload(context.Context, *ImageDownload) (*ImageDownload, error)
+	// List image download db records for a given device
+	ListImageDownloads(context.Context, *common.ID) (*ImageDownloads, error)
+	// Cancel an existing image download process on a device
+	CancelImageDownload(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Activate the specified image at a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	ActivateImageUpdate(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Revert the specified image at standby partition
+	// to active partition, and revert to previous image
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// previous image running on device
+	// Note that the call is expected to be non-blocking.
+	RevertImageUpdate(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// List ports of a device
+	ListDevicePorts(context.Context, *common.ID) (*Ports, error)
+	// List pm config of a device
+	ListDevicePmConfigs(context.Context, *common.ID) (*PmConfigs, error)
+	// Update the pm config of a device
+	UpdateDevicePmConfigs(context.Context, *PmConfigs) (*empty.Empty, error)
+	// List all flows of a device
+	ListDeviceFlows(context.Context, *common.ID) (*openflow_13.Flows, error)
+	// List all flow groups of a device
+	ListDeviceFlowGroups(context.Context, *common.ID) (*openflow_13.FlowGroups, error)
+	// List device types known to Voltha
+	ListDeviceTypes(context.Context, *empty.Empty) (*DeviceTypes, error)
+	// Get additional information on a device type
+	GetDeviceType(context.Context, *common.ID) (*DeviceType, error)
+	// List all device sharding groups
+	ListDeviceGroups(context.Context, *empty.Empty) (*DeviceGroups, error)
+	// Stream control packets to the dataplane
+	StreamPacketsOut(VolthaService_StreamPacketsOutServer) error
+	// Receive control packet stream
+	ReceivePacketsIn(*empty.Empty, VolthaService_ReceivePacketsInServer) error
+	ReceiveChangeEvents(*empty.Empty, VolthaService_ReceiveChangeEventsServer) error
+	// Get additional information on a device group
+	GetDeviceGroup(context.Context, *common.ID) (*DeviceGroup, error)
+	CreateEventFilter(context.Context, *EventFilter) (*EventFilter, error)
+	// Get all filters present for a device
+	GetEventFilter(context.Context, *common.ID) (*EventFilters, error)
+	UpdateEventFilter(context.Context, *EventFilter) (*EventFilter, error)
+	DeleteEventFilter(context.Context, *EventFilter) (*empty.Empty, error)
+	// Get all the filters present
+	ListEventFilters(context.Context, *empty.Empty) (*EventFilters, error)
+	GetImages(context.Context, *common.ID) (*Images, error)
+	SelfTest(context.Context, *common.ID) (*SelfTestResponse, error)
+	// OpenOMCI MIB information
+	GetMibDeviceData(context.Context, *common.ID) (*omci.MibDeviceData, error)
+	// OpenOMCI ALARM information
+	GetAlarmDeviceData(context.Context, *common.ID) (*omci.AlarmDeviceData, error)
+	// Simulate an Alarm
+	SimulateAlarm(context.Context, *SimulateAlarmRequest) (*common.OperationResp, error)
+	Subscribe(context.Context, *OfAgentSubscriber) (*OfAgentSubscriber, error)
+	EnablePort(context.Context, *Port) (*empty.Empty, error)
+	DisablePort(context.Context, *Port) (*empty.Empty, error)
+	// omci start and stop cli implementation
+	StartOmciTestAction(context.Context, *OmciTestRequest) (*TestResponse, error)
+}
+
+func RegisterVolthaServiceServer(s *grpc.Server, srv VolthaServiceServer) {
+	s.RegisterService(&_VolthaService_serviceDesc, srv)
+}
+
+func _VolthaService_GetMembership_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetMembership(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetMembership",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetMembership(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateMembership_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Membership)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateMembership(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateMembership",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateMembership(ctx, req.(*Membership))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetVoltha_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetVoltha(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetVoltha",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetVoltha(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListCoreInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListCoreInstances(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListCoreInstances",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListCoreInstances(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetCoreInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetCoreInstance(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetCoreInstance",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetCoreInstance(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListAdapters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListAdapters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListAdapters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListAdapters(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDevices(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetLogicalDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetLogicalDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetLogicalDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetLogicalDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDevicePorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDevicePorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDevicePorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDevicePorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnableLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnableLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnableLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnableLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisableLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisableLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisableLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisableLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceFlows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlows(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceFlows",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlows(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceFlowTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.FlowTableUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceFlowTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowTable(ctx, req.(*openflow_13.FlowTableUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceMeterTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.MeterModUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceMeterTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceMeterTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceMeterTable(ctx, req.(*openflow_13.MeterModUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceMeters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceMeters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceMeters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceMeters(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceFlowGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlowGroups(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceFlowGroups",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlowGroups(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceFlowGroupTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.FlowGroupTableUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowGroupTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceFlowGroupTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowGroupTable(ctx, req.(*openflow_13.FlowGroupTableUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevices(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceIds(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceIds",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceIds(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ReconcileDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.IDs)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ReconcileDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ReconcileDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ReconcileDevices(ctx, req.(*common.IDs))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CreateDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CreateDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CreateDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CreateDevice(ctx, req.(*Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnableDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisableDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_RebootDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).RebootDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/RebootDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).RebootDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DeleteDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DeleteDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DeleteDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DeleteDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DownloadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DownloadImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DownloadImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DownloadImage(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImageDownloadStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImageDownloadStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImageDownloadStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImageDownloadStatus(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImageDownload(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListImageDownloads_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListImageDownloads(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListImageDownloads",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListImageDownloads(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CancelImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CancelImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CancelImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CancelImageDownload(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ActivateImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ActivateImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ActivateImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ActivateImageUpdate(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_RevertImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).RevertImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/RevertImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).RevertImageUpdate(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevicePorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevicePorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevicePorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevicePorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevicePmConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevicePmConfigs(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevicePmConfigs",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevicePmConfigs(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateDevicePmConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PmConfigs)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateDevicePmConfigs(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateDevicePmConfigs",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateDevicePmConfigs(ctx, req.(*PmConfigs))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceFlows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceFlows(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceFlows",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceFlows(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceFlowGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceFlowGroups(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceFlowGroups",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceFlowGroups(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceTypes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceTypes(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceTypes",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceTypes(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetDeviceType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetDeviceType(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetDeviceType",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetDeviceType(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceGroups(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceGroups",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceGroups(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_StreamPacketsOut_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(VolthaServiceServer).StreamPacketsOut(&volthaServiceStreamPacketsOutServer{stream})
+}
+
+type VolthaService_StreamPacketsOutServer interface {
+	SendAndClose(*empty.Empty) error
+	Recv() (*openflow_13.PacketOut, error)
+	grpc.ServerStream
+}
+
+type volthaServiceStreamPacketsOutServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceStreamPacketsOutServer) SendAndClose(m *empty.Empty) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *volthaServiceStreamPacketsOutServer) Recv() (*openflow_13.PacketOut, error) {
+	m := new(openflow_13.PacketOut)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _VolthaService_ReceivePacketsIn_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(empty.Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(VolthaServiceServer).ReceivePacketsIn(m, &volthaServiceReceivePacketsInServer{stream})
+}
+
+type VolthaService_ReceivePacketsInServer interface {
+	Send(*openflow_13.PacketIn) error
+	grpc.ServerStream
+}
+
+type volthaServiceReceivePacketsInServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceReceivePacketsInServer) Send(m *openflow_13.PacketIn) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _VolthaService_ReceiveChangeEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(empty.Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(VolthaServiceServer).ReceiveChangeEvents(m, &volthaServiceReceiveChangeEventsServer{stream})
+}
+
+type VolthaService_ReceiveChangeEventsServer interface {
+	Send(*openflow_13.ChangeEvent) error
+	grpc.ServerStream
+}
+
+type volthaServiceReceiveChangeEventsServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceReceiveChangeEventsServer) Send(m *openflow_13.ChangeEvent) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _VolthaService_GetDeviceGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetDeviceGroup(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetDeviceGroup",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetDeviceGroup(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CreateEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CreateEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CreateEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CreateEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetEventFilter(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DeleteEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DeleteEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DeleteEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DeleteEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListEventFilters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListEventFilters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListEventFilters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListEventFilters(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImages(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImages",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImages(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_SelfTest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).SelfTest(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/SelfTest",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).SelfTest(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetMibDeviceData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetMibDeviceData(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetMibDeviceData",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetMibDeviceData(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetAlarmDeviceData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetAlarmDeviceData(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetAlarmDeviceData",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetAlarmDeviceData(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_SimulateAlarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SimulateAlarmRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).SimulateAlarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/SimulateAlarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).SimulateAlarm(ctx, req.(*SimulateAlarmRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_Subscribe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OfAgentSubscriber)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).Subscribe(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/Subscribe",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).Subscribe(ctx, req.(*OfAgentSubscriber))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnablePort(ctx, req.(*Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisablePort(ctx, req.(*Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_StartOmciTestAction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OmciTestRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).StartOmciTestAction(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/StartOmciTestAction",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).StartOmciTestAction(ctx, req.(*OmciTestRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _VolthaService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.VolthaService",
+	HandlerType: (*VolthaServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetMembership",
+			Handler:    _VolthaService_GetMembership_Handler,
+		},
+		{
+			MethodName: "UpdateMembership",
+			Handler:    _VolthaService_UpdateMembership_Handler,
+		},
+		{
+			MethodName: "GetVoltha",
+			Handler:    _VolthaService_GetVoltha_Handler,
+		},
+		{
+			MethodName: "ListCoreInstances",
+			Handler:    _VolthaService_ListCoreInstances_Handler,
+		},
+		{
+			MethodName: "GetCoreInstance",
+			Handler:    _VolthaService_GetCoreInstance_Handler,
+		},
+		{
+			MethodName: "ListAdapters",
+			Handler:    _VolthaService_ListAdapters_Handler,
+		},
+		{
+			MethodName: "ListLogicalDevices",
+			Handler:    _VolthaService_ListLogicalDevices_Handler,
+		},
+		{
+			MethodName: "GetLogicalDevice",
+			Handler:    _VolthaService_GetLogicalDevice_Handler,
+		},
+		{
+			MethodName: "ListLogicalDevicePorts",
+			Handler:    _VolthaService_ListLogicalDevicePorts_Handler,
+		},
+		{
+			MethodName: "GetLogicalDevicePort",
+			Handler:    _VolthaService_GetLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "EnableLogicalDevicePort",
+			Handler:    _VolthaService_EnableLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "DisableLogicalDevicePort",
+			Handler:    _VolthaService_DisableLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceFlows",
+			Handler:    _VolthaService_ListLogicalDeviceFlows_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceFlowTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceFlowTable_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceMeterTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceMeterTable_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceMeters",
+			Handler:    _VolthaService_ListLogicalDeviceMeters_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceFlowGroups",
+			Handler:    _VolthaService_ListLogicalDeviceFlowGroups_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceFlowGroupTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceFlowGroupTable_Handler,
+		},
+		{
+			MethodName: "ListDevices",
+			Handler:    _VolthaService_ListDevices_Handler,
+		},
+		{
+			MethodName: "ListDeviceIds",
+			Handler:    _VolthaService_ListDeviceIds_Handler,
+		},
+		{
+			MethodName: "ReconcileDevices",
+			Handler:    _VolthaService_ReconcileDevices_Handler,
+		},
+		{
+			MethodName: "GetDevice",
+			Handler:    _VolthaService_GetDevice_Handler,
+		},
+		{
+			MethodName: "CreateDevice",
+			Handler:    _VolthaService_CreateDevice_Handler,
+		},
+		{
+			MethodName: "EnableDevice",
+			Handler:    _VolthaService_EnableDevice_Handler,
+		},
+		{
+			MethodName: "DisableDevice",
+			Handler:    _VolthaService_DisableDevice_Handler,
+		},
+		{
+			MethodName: "RebootDevice",
+			Handler:    _VolthaService_RebootDevice_Handler,
+		},
+		{
+			MethodName: "DeleteDevice",
+			Handler:    _VolthaService_DeleteDevice_Handler,
+		},
+		{
+			MethodName: "DownloadImage",
+			Handler:    _VolthaService_DownloadImage_Handler,
+		},
+		{
+			MethodName: "GetImageDownloadStatus",
+			Handler:    _VolthaService_GetImageDownloadStatus_Handler,
+		},
+		{
+			MethodName: "GetImageDownload",
+			Handler:    _VolthaService_GetImageDownload_Handler,
+		},
+		{
+			MethodName: "ListImageDownloads",
+			Handler:    _VolthaService_ListImageDownloads_Handler,
+		},
+		{
+			MethodName: "CancelImageDownload",
+			Handler:    _VolthaService_CancelImageDownload_Handler,
+		},
+		{
+			MethodName: "ActivateImageUpdate",
+			Handler:    _VolthaService_ActivateImageUpdate_Handler,
+		},
+		{
+			MethodName: "RevertImageUpdate",
+			Handler:    _VolthaService_RevertImageUpdate_Handler,
+		},
+		{
+			MethodName: "ListDevicePorts",
+			Handler:    _VolthaService_ListDevicePorts_Handler,
+		},
+		{
+			MethodName: "ListDevicePmConfigs",
+			Handler:    _VolthaService_ListDevicePmConfigs_Handler,
+		},
+		{
+			MethodName: "UpdateDevicePmConfigs",
+			Handler:    _VolthaService_UpdateDevicePmConfigs_Handler,
+		},
+		{
+			MethodName: "ListDeviceFlows",
+			Handler:    _VolthaService_ListDeviceFlows_Handler,
+		},
+		{
+			MethodName: "ListDeviceFlowGroups",
+			Handler:    _VolthaService_ListDeviceFlowGroups_Handler,
+		},
+		{
+			MethodName: "ListDeviceTypes",
+			Handler:    _VolthaService_ListDeviceTypes_Handler,
+		},
+		{
+			MethodName: "GetDeviceType",
+			Handler:    _VolthaService_GetDeviceType_Handler,
+		},
+		{
+			MethodName: "ListDeviceGroups",
+			Handler:    _VolthaService_ListDeviceGroups_Handler,
+		},
+		{
+			MethodName: "GetDeviceGroup",
+			Handler:    _VolthaService_GetDeviceGroup_Handler,
+		},
+		{
+			MethodName: "CreateEventFilter",
+			Handler:    _VolthaService_CreateEventFilter_Handler,
+		},
+		{
+			MethodName: "GetEventFilter",
+			Handler:    _VolthaService_GetEventFilter_Handler,
+		},
+		{
+			MethodName: "UpdateEventFilter",
+			Handler:    _VolthaService_UpdateEventFilter_Handler,
+		},
+		{
+			MethodName: "DeleteEventFilter",
+			Handler:    _VolthaService_DeleteEventFilter_Handler,
+		},
+		{
+			MethodName: "ListEventFilters",
+			Handler:    _VolthaService_ListEventFilters_Handler,
+		},
+		{
+			MethodName: "GetImages",
+			Handler:    _VolthaService_GetImages_Handler,
+		},
+		{
+			MethodName: "SelfTest",
+			Handler:    _VolthaService_SelfTest_Handler,
+		},
+		{
+			MethodName: "GetMibDeviceData",
+			Handler:    _VolthaService_GetMibDeviceData_Handler,
+		},
+		{
+			MethodName: "GetAlarmDeviceData",
+			Handler:    _VolthaService_GetAlarmDeviceData_Handler,
+		},
+		{
+			MethodName: "SimulateAlarm",
+			Handler:    _VolthaService_SimulateAlarm_Handler,
+		},
+		{
+			MethodName: "Subscribe",
+			Handler:    _VolthaService_Subscribe_Handler,
+		},
+		{
+			MethodName: "EnablePort",
+			Handler:    _VolthaService_EnablePort_Handler,
+		},
+		{
+			MethodName: "DisablePort",
+			Handler:    _VolthaService_DisablePort_Handler,
+		},
+		{
+			MethodName: "StartOmciTestAction",
+			Handler:    _VolthaService_StartOmciTestAction_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "StreamPacketsOut",
+			Handler:       _VolthaService_StreamPacketsOut_Handler,
+			ClientStreams: true,
+		},
+		{
+			StreamName:    "ReceivePacketsIn",
+			Handler:       _VolthaService_ReceivePacketsIn_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "ReceiveChangeEvents",
+			Handler:       _VolthaService_ReceiveChangeEvents_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "voltha_protos/voltha.proto",
+}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 0000000..5e98735
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,34 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
new file mode 100644
index 0000000..fd6c6db
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+env:
+  - GO111MODULE=off
+
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - 1.12.x
+  - master
+
+matrix:
+ fast_finish: true
+ allow_failures:
+   - go: master
+
+sudo: false
+
+script: 
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
+ - go test -v -cpu=2 -tags noasm
+ - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 0000000..bd899d8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 0000000..be1f52a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,106 @@
+# lz4 : LZ4 compression in pure Go
+
+[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
+[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/cmd/lz4c
+```
+
+Usage
+
+```
+Usage of lz4c:
+  -version
+        print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all contributors so far:
+
+- [@klauspost](https://github.com/klauspost)
+- [@heidawei](https://github.com/heidawei)
+- [@x4m](https://github.com/x4m)
+- [@Zariel](https://github.com/Zariel)
+- [@edwingeng](https://github.com/edwingeng)
+- [@danielmoy-google](https://github.com/danielmoy-google)
+- [@honda-tatsuya](https://github.com/honda-tatsuya)
+- [@h8liu](https://github.com/h8liu)
+- [@sbinet](https://github.com/sbinet)
+- [@fingon](https://github.com/fingon)
+- [@emfree](https://github.com/emfree)
+- [@lhemala](https://github.com/lhemala)
+- [@connor4312](https://github.com/connor4312)
+- [@oov](https://github.com/oov)
+- [@arya](https://github.com/arya)
+- [@ikkeps](https://github.com/ikkeps)
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 0000000..5755cda
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,387 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math/bits"
+)
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	if len(src) == 0 {
+		return 0, nil
+	}
+	if di := decodeBlock(dst, src); di >= 0 {
+		return di, nil
+	}
+	return 0, ErrInvalidSourceShortBuffer
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+// The size of hashTable must be at least 64Kb.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
+	defer recoverBlock(&err)
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 {
+		return 0, nil
+	}
+	if len(hashTable) < htSize {
+		return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize)
+	}
+	// Prove to the compiler the table has at least htSize elements.
+	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
+	hashTable = hashTable[:htSize]
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, anchor int
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	for si < sn {
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
+		h := blockHash(match)
+		h2 := blockHash(match >> 8)
+
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
+		ref := hashTable[h]
+		ref2 := hashTable[h2]
+		hashTable[h] = si
+		hashTable[h2] = si + 1
+		offset := si - ref
+
+		// If offset <= 0 we got an old entry in the hash table.
+		if offset <= 0 || offset >= winSize || // Out of window.
+			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref = hashTable[h]
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize ||
+				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref
+				hashTable[h] = si
+
+				if offset <= 0 || offset >= winSize ||
+					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
+		}
+
+		// Match found.
+		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
+
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
+		}
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si < sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
+		}
+
+		mLen = si - mLen
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		_ = dst[di] // Bound check elimination.
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		hashTable[h] = si - 2
+	}
+
+	if anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
+// CompressBlockHC compresses the source buffer src into the destination dst
+// with max search depth (use 0 or negative value for no max).
+//
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
+	defer recoverBlock(&err)
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compresssion.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 {
+		return 0, nil
+	}
+	var si int
+
+	// hashTable: stores the last position found for a given hash
+	// chainTable: stores previous positions for a given hash
+	var hashTable, chainTable [winSize]int
+
+	if depth <= 0 {
+		depth = winSize
+	}
+
+	anchor := si
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHashHC(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (<minMath) or smaller than the current match.
+				continue
+			}
+			// Found a longer match, keep its position and length.
+			mLen = ml
+			offset = si - next
+			// Try another previous position with the same hash.
+			try--
+		}
+		chainTable[si&winMask] = hashTable[h]
+		hashTable[h] = si
+
+		// No match found.
+		if mLen == 0 {
+			si += 1 + (si-anchor)>>adaptSkipLog
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHashHC(match)
+			chainTable[si&winMask] = hashTable[h]
+			hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
new file mode 100644
index 0000000..bc5e78d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug.go
@@ -0,0 +1,23 @@
+// +build lz4debug
+
+package lz4
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+const debugFlag = true
+
+func debug(args ...interface{}) {
+	_, file, line, _ := runtime.Caller(1)
+	file = filepath.Base(file)
+
+	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
+	if f[len(f)-1] != '\n' {
+		f += "\n"
+	}
+	fmt.Fprintf(os.Stderr, f, args[1:]...)
+}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
new file mode 100644
index 0000000..44211ad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug_stub.go
@@ -0,0 +1,7 @@
+// +build !lz4debug
+
+package lz4
+
+const debugFlag = false
+
+func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
new file mode 100644
index 0000000..43cc14f
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_amd64.go
@@ -0,0 +1,8 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4
+
+//go:noescape
+func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
new file mode 100644
index 0000000..20fef39
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_amd64.s
@@ -0,0 +1,375 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX scratch
+// DX token
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// func decodeBlock(dst, src []byte) int
+// using 50 bytes of stack currently
+TEXT ·decodeBlock(SB), NOSPLIT, $64-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, R11
+	MOVQ dst_len+8(FP), R8
+	ADDQ DI, R8
+
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R9
+	ADDQ SI, R9
+
+	// shortcut ends
+	// short output end
+	MOVQ R8, R12
+	SUBQ $32, R12
+	// short input end
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+loop:
+	// for si < len(src)
+	CMPQ SI, R9
+	JGE end
+
+	// token := uint32(src[si])
+	MOVBQZX (SI), DX
+	INCQ SI
+
+	// lit_len = token >> 4
+	// if lit_len > 0
+	// CX = lit_len
+	MOVQ DX, CX
+	SHRQ $4, CX
+
+	// if lit_len != 0xF
+	CMPQ CX, $0xF
+	JEQ lit_len_loop_pre
+	CMPQ DI, R12
+	JGE lit_len_loop_pre
+	CMPQ SI, R13
+	JGE lit_len_loop_pre
+
+	// copy shortcut
+
+	// A two-stage shortcut for the most common case:
+	// 1) If the literal length is 0..14, and there is enough space,
+	// enter the shortcut and copy 16 bytes on behalf of the literals
+	// (in the fast mode, only 8 bytes can be safely copied this way).
+	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
+	// manner; but we ensure that there's enough space in the output for
+	// those 18 bytes earlier, upon entering the shortcut (in other words,
+	// there is a combined check for both stages).
+
+	// copy literal
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+	ADDQ CX, DI
+	ADDQ CX, SI
+
+	MOVQ DX, CX
+	ANDQ $0xF, CX
+
+	// The second stage: prepare for match copying, decode full info.
+	// If it doesn't work out, the info won't be wasted.
+	// offset := uint16(data[:2])
+	MOVWQZX (SI), DX
+	ADDQ $2, SI
+
+	MOVQ DI, AX
+	SUBQ DX, AX
+	CMPQ AX, DI
+	JGT err_short_buf
+
+	// if we can't do the second stage then jump straight to read the
+	// match length, we already have the offset.
+	CMPQ CX, $0xF
+	JEQ match_len_loop_pre
+	CMPQ DX, $8
+	JLT match_len_loop_pre
+	CMPQ AX, R11
+	JLT err_short_buf
+
+	// memcpy(op + 0, match + 0, 8);
+	MOVQ (AX), BX
+	MOVQ BX, (DI)
+	// memcpy(op + 8, match + 8, 8);
+	MOVQ 8(AX), BX
+	MOVQ BX, 8(DI)
+	// memcpy(op +16, match +16, 2);
+	MOVW 16(AX), BX
+	MOVW BX, 16(DI)
+
+	ADDQ $4, DI // minmatch
+	ADDQ CX, DI
+
+	// shortcut complete, load next token
+	JMP loop
+
+lit_len_loop_pre:
+	// if lit_len > 0
+	CMPQ CX, $0
+	JEQ offset
+	CMPQ CX, $0xF
+	JNE copy_literal
+
+lit_len_loop:
+	// for src[si] == 0xFF
+	CMPB (SI), $0xFF
+	JNE lit_len_finalise
+
+	// bounds check src[si+1]
+	MOVQ SI, AX
+	ADDQ $1, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	// lit_len += 0xFF
+	ADDQ $0xFF, CX
+	INCQ SI
+	JMP lit_len_loop
+
+lit_len_finalise:
+	// lit_len += int(src[si])
+	// si++
+	MOVBQZX (SI), AX
+	ADDQ AX, CX
+	INCQ SI
+
+copy_literal:
+	// bounds check src and dst
+	MOVQ SI, AX
+	ADDQ CX, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	MOVQ DI, AX
+	ADDQ CX, AX
+	CMPQ AX, R8
+	JGT err_short_buf
+
+	// whats a good cut off to call memmove?
+	CMPQ CX, $16
+	JGT memmove_lit
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_lit
+
+	// if len(src[si:]) < 16
+	MOVQ R9, AX
+	SUBQ SI, AX
+	CMPQ AX, $16
+	JLT memmove_lit
+
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+
+	JMP finish_lit_copy
+
+memmove_lit:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP) // need len to inc SI, DI after
+	MOVB DX, 48(SP)
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVB 48(SP), DX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+finish_lit_copy:
+	ADDQ CX, SI
+	ADDQ CX, DI
+
+	CMPQ SI, R9
+	JGE end
+
+offset:
+	// CX := mLen
+	// free up DX to use for offset
+	MOVQ DX, CX
+
+	MOVQ SI, AX
+	ADDQ $2, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	// offset
+	// DX := int(src[si]) | int(src[si+1])<<8
+	MOVWQZX (SI), DX
+	ADDQ $2, SI
+
+	// 0 offset is invalid
+	CMPQ DX, $0
+	JEQ err_corrupt
+
+	ANDB $0xF, CX
+
+match_len_loop_pre:
+	// if mlen != 0xF
+	CMPB CX, $0xF
+	JNE copy_match
+
+match_len_loop:
+	// for src[si] == 0xFF
+	// lit_len += 0xFF
+	CMPB (SI), $0xFF
+	JNE match_len_finalise
+
+	// bounds check src[si+1]
+	MOVQ SI, AX
+	ADDQ $1, AX
+	CMPQ AX, R9
+	JGT err_short_buf
+
+	ADDQ $0xFF, CX
+	INCQ SI
+	JMP match_len_loop
+
+match_len_finalise:
+	// lit_len += int(src[si])
+	// si++
+	MOVBQZX (SI), AX
+	ADDQ AX, CX
+	INCQ SI
+
+copy_match:
+	// mLen += minMatch
+	ADDQ $4, CX
+
+	// check we have match_len bytes left in dst
+	// di+match_len < len(dst)
+	MOVQ DI, AX
+	ADDQ CX, AX
+	CMPQ AX, R8
+	JGT err_short_buf
+
+	// DX = offset
+	// CX = match_len
+	// BX = &dst + (di - offset)
+	MOVQ DI, BX
+	SUBQ DX, BX
+
+	// check BX is within dst
+	// if BX < &dst
+	CMPQ BX, R11
+	JLT err_short_buf
+
+	// if offset + match_len < di
+	MOVQ BX, AX
+	ADDQ CX, AX
+	CMPQ DI, AX
+	JGT copy_interior_match
+
+	// AX := len(dst[:di])
+	// MOVQ DI, AX
+	// SUBQ R11, AX
+
+	// copy 16 bytes at a time
+	// if di-offset < 16 copy 16-(di-offset) bytes to di
+	// then do the remaining
+
+copy_match_loop:
+	// for match_len >= 0
+	// dst[di] = dst[i]
+	// di++
+	// i++
+	MOVB (BX), AX
+	MOVB AX, (DI)
+	INCQ DI
+	INCQ BX
+	DECQ CX
+
+	CMPQ CX, $0
+	JGT copy_match_loop
+
+	JMP loop
+
+copy_interior_match:
+	CMPQ CX, $16
+	JGT memmove_match
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_match
+
+	MOVOU (BX), X0
+	MOVOU X0, (DI)
+
+	ADDQ CX, DI
+	JMP loop
+
+memmove_match:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ CX, 16(SP)
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP) // need len to inc SI, DI after
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	ADDQ CX, DI
+	JMP loop
+
+err_corrupt:
+	MOVQ $-1, ret+48(FP)
+	RET
+
+err_short_buf:
+	MOVQ $-2, ret+48(FP)
+	RET
+
+end:
+	SUBQ R11, DI
+	MOVQ DI, ret+48(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
new file mode 100644
index 0000000..919888e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/decode_other.go
@@ -0,0 +1,98 @@
+// +build !amd64 appengine !gc noasm
+
+package lz4
+
+func decodeBlock(dst, src []byte) (ret int) {
+	const hasError = -2
+	defer func() {
+		if recover() != nil {
+			ret = hasError
+		}
+	}()
+
+	var si, di int
+	for {
+		// Literals and match lengths (token).
+		b := int(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			switch {
+			case lLen < 0xF && si+16 < len(src):
+				// Shortcut 1
+				// if we have enough room in src and dst, and the literals length
+				// is small enough (0..14) then copy all 16 bytes, even if not all
+				// are part of the literals.
+				copy(dst[di:], src[si:si+16])
+				si += lLen
+				di += lLen
+				if mLen := b & 0xF; mLen < 0xF {
+					// Shortcut 2
+					// if the match length (4..18) fits within the literals, then copy
+					// all 18 bytes, even if not all are part of the literals.
+					mLen += 4
+					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
+						i := di - offset
+						end := i + 18
+						if end > len(dst) {
+							// The remaining buffer may not hold 18 bytes.
+							// See https://github.com/pierrec/lz4/issues/51.
+							end = len(dst)
+						}
+						copy(dst[di:], dst[i:end])
+						si += 2
+						di += mLen
+						continue
+					}
+				}
+			case lLen == 0xF:
+				for src[si] == 0xFF {
+					lLen += 0xFF
+					si++
+				}
+				lLen += int(src[si])
+				si++
+				fallthrough
+			default:
+				copy(dst[di:di+lLen], src[si:si+lLen])
+				si += lLen
+				di += lLen
+			}
+		}
+		if si >= len(src) {
+			return di
+		}
+
+		offset := int(src[si]) | int(src[si+1])<<8
+		if offset == 0 {
+			return hasError
+		}
+		si += 2
+
+		// Match.
+		mLen := b & 0xF
+		if mLen == 0xF {
+			for src[si] == 0xFF {
+				mLen += 0xFF
+				si++
+			}
+			mLen += int(src[si])
+			si++
+		}
+		mLen += minMatch
+
+		// Copy the match.
+		expanded := dst[di-offset:]
+		if mLen > offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += copy(dst[di:di+mLen], expanded[:mLen])
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
new file mode 100644
index 0000000..1c45d18
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/errors.go
@@ -0,0 +1,30 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	rdebug "runtime/debug"
+)
+
+var (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
+	// ErrInvalid is returned when reading an invalid LZ4 archive.
+	ErrInvalid = errors.New("lz4: bad magic number")
+	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
+	ErrBlockDependency = errors.New("lz4: block dependency not supported")
+	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
+	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		if debugFlag {
+			fmt.Fprintln(os.Stderr, r)
+			rdebug.PrintStack()
+		}
+		*e = ErrInvalidSourceShortBuffer
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..7a76a6b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,223 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/XXH/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
+
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v1       uint32
+	v2       uint32
+	v3       uint32
+	v4       uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v1 = prime1plus2
+	xxh.v2 = prime2
+	xxh.v3 = 0
+	xxh.v4 = prime1minus
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	p := 0
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
+	if m > 0 {
+		// some data left from previous update
+		copy(xxh.buf[xxh.bufused:], input[:r])
+		xxh.bufused += len(input) - r
+
+		// fast rotl(13)
+		buf := xxh.buf[:16] // BCE hint.
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+		p = r
+		xxh.bufused = 0
+	}
+
+	for n := n - 16; p <= n; p += 16 {
+		sub := input[p:][:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+	}
+	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
+
+	copy(xxh.buf[xxh.bufused:], input[p:])
+	xxh.bufused += len(input) - p
+
+	return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
+	} else {
+		h32 += prime5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// ChecksumZero returns the 32bits Hash value.
+func ChecksumZero(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime5
+	} else {
+		v1 := prime1plus2
+		v2 := prime2
+		v3 := uint32(0)
+		v4 := prime1minus
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Uint32Zero hashes x with seed 0.
+func Uint32Zero(x uint32) uint32 {
+	h := prime5 + 4 + x*prime3
+	h = rol17(h) * prime4
+	h ^= h >> 15
+	h *= prime2
+	h ^= h >> 13
+	h *= prime3
+	h ^= h >> 16
+	return h
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 0000000..cdbf961
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,66 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+//
+package lz4
+
+const (
+	// Extension is the LZ4 frame file name extension
+	Extension = ".lz4"
+	// Version is the LZ4 frame format version
+	Version = 1
+
+	frameMagic     uint32 = 0x184D2204
+	frameSkipMagic uint32 = 0x184D2A50
+
+	// The following constants are used to setup the compression algorithm.
+	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog          = 16 // LZ4 64Kb window size limit
+	winSize             = 1 << winSizeLog
+	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
+	compressedBlockFlag = 1 << 31
+	compressedBlockMask = compressedBlockFlag - 1
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+const (
+	blockSize64K  = 64 << 10
+	blockSize256K = 256 << 10
+	blockSize1M   = 1 << 20
+	blockSize4M   = 4 << 20
+)
+
+var (
+	bsMapID    = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
+	bsMapValue = map[int]byte{blockSize64K: 4, blockSize256K: 5, blockSize1M: 6, blockSize4M: 7}
+)
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition
+// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller responsibility to check them if necessary.
+type Header struct {
+	BlockChecksum    bool   // Compressed blocks checksum flag.
+	NoChecksum       bool   // Frame checksum flag.
+	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
+	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
+	done             bool   // Header processed flag (Read or Write and checked).
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
new file mode 100644
index 0000000..9a0fb00
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
@@ -0,0 +1,29 @@
+//+build go1.10
+
+package lz4
+
+import (
+	"fmt"
+	"strings"
+)
+
+func (h Header) String() string {
+	var s strings.Builder
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
new file mode 100644
index 0000000..12c761a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
@@ -0,0 +1,29 @@
+//+build !go1.10
+
+package lz4
+
+import (
+	"bytes"
+	"fmt"
+)
+
+func (h Header) String() string {
+	var s bytes.Buffer
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 0000000..126b792
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,335 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+	Header
+	// Handler called when a block has been successfully read.
+	// It provides the number of bytes read.
+	OnBlockDone func(size int)
+
+	buf      [8]byte       // Scrap buffer.
+	pos      int64         // Current position in src.
+	src      io.Reader     // Source.
+	zdata    []byte        // Compressed data.
+	data     []byte        // Uncompressed data.
+	idx      int           // Index of unread bytes into data.
+	checksum xxh32.XXHZero // Frame hash.
+	skip     int64         // Bytes to skip before next read.
+	dpos     int64         // Position in dest
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+	r := &Reader{src: src}
+	return r
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+	defer z.checksum.Reset()
+
+	buf := z.buf[:]
+	for {
+		magic, err := z.readUint32()
+		if err != nil {
+			z.pos += 4
+			if !first && err == io.ErrUnexpectedEOF {
+				return io.EOF
+			}
+			return err
+		}
+		if magic == frameMagic {
+			break
+		}
+		if magic>>8 != frameSkipMagic>>8 {
+			return ErrInvalid
+		}
+		skipSize, err := z.readUint32()
+		if err != nil {
+			return err
+		}
+		z.pos += 4
+		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+		if err != nil {
+			return err
+		}
+		z.pos += m
+	}
+
+	// Header.
+	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+		return err
+	}
+	z.pos += 8
+
+	b := buf[0]
+	if v := b >> 6; v != Version {
+		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
+	}
+	if b>>5&1 == 0 {
+		return ErrBlockDependency
+	}
+	z.BlockChecksum = b>>4&1 > 0
+	frameSize := b>>3&1 > 0
+	z.NoChecksum = b>>2&1 == 0
+
+	bmsID := buf[1] >> 4 & 0x7
+	bSize, ok := bsMapID[bmsID]
+	if !ok {
+		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
+	}
+	z.BlockMaxSize = bSize
+
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	if debugFlag {
+		debug("header block max size id=%d size=%d", bmsID, bSize)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = len(z.data)
+
+	_, _ = z.checksum.Write(buf[0:2])
+
+	if frameSize {
+		buf := buf[:8]
+		if _, err := io.ReadFull(z.src, buf); err != nil {
+			return err
+		}
+		z.Size = binary.LittleEndian.Uint64(buf)
+		z.pos += 8
+		_, _ = z.checksum.Write(buf)
+	}
+
+	// Header checksum.
+	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+		return err
+	}
+	z.pos++
+	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
+	}
+
+	z.Header.done = true
+	if debugFlag {
+		debug("header read: %v", z.Header)
+	}
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+func (z *Reader) Read(buf []byte) (int, error) {
+	if debugFlag {
+		debug("Read buf len=%d", len(buf))
+	}
+	if !z.Header.done {
+		if err := z.readHeader(true); err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
+				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
+		}
+	}
+
+	if len(buf) == 0 {
+		return 0, nil
+	}
+
+	if z.idx == len(z.data) {
+		// No data ready for reading, process the next block.
+		if debugFlag {
+			debug("reading block from writer")
+		}
+		// Reset uncompressed buffer
+		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
+
+		// Block length: 0 = end of frame, highest bit set: uncompressed.
+		bLen, err := z.readUint32()
+		if err != nil {
+			return 0, err
+		}
+		z.pos += 4
+
+		if bLen == 0 {
+			// End of frame reached.
+			if !z.NoChecksum {
+				// Validate the frame checksum.
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				if debugFlag {
+					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
+				}
+				z.pos += 4
+				if h := z.checksum.Sum32(); checksum != h {
+					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			// Get ready for the next concatenated frame and keep the position.
+			pos := z.pos
+			z.Reset(z.src)
+			z.pos = pos
+
+			// Since multiple frames can be concatenated, check for more.
+			return 0, z.readHeader(false)
+		}
+
+		if debugFlag {
+			debug("raw block size %d", bLen)
+		}
+		if bLen&compressedBlockFlag > 0 {
+			// Uncompressed block.
+			bLen &= compressedBlockMask
+			if debugFlag {
+				debug("uncompressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			z.data = z.data[:bLen]
+			if _, err := io.ReadFull(z.src, z.data); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(int(bLen))
+			}
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(z.data); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+		} else {
+			// Compressed block.
+			if debugFlag {
+				debug("compressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			zdata := z.zdata[:bLen]
+			if _, err := io.ReadFull(z.src, zdata); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(zdata); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			n, err := UncompressBlock(zdata, z.data)
+			if err != nil {
+				return 0, err
+			}
+			z.data = z.data[:n]
+			if z.OnBlockDone != nil {
+				z.OnBlockDone(n)
+			}
+		}
+
+		if !z.NoChecksum {
+			_, _ = z.checksum.Write(z.data)
+			if debugFlag {
+				debug("current frame checksum %x", z.checksum.Sum32())
+			}
+		}
+		z.idx = 0
+	}
+
+	if z.skip > int64(len(z.data[z.idx:])) {
+		z.skip -= int64(len(z.data[z.idx:]))
+		z.dpos += int64(len(z.data[z.idx:]))
+		z.idx = len(z.data)
+		return 0, nil
+	}
+
+	z.idx += int(z.skip)
+	z.dpos += z.skip
+	z.skip = 0
+
+	n := copy(buf, z.data[z.idx:])
+	z.idx += n
+	z.dpos += int64(n)
+	if debugFlag {
+		debug("copied %d bytes to input", n)
+	}
+
+	return n, nil
+}
+
+// Seek implements io.Seeker, but supports seeking forward from the current
+// position only. Any other seek will return an error. Allows skipping output
+// bytes which aren't needed, which in some scenarios is faster than reading
+// and discarding them.
+// Note this may cause future calls to Read() to read 0 bytes if all of the
+// data they would have returned is skipped.
+func (z *Reader) Seek(offset int64, whence int) (int64, error) {
+	if offset < 0 || whence != io.SeekCurrent {
+		return z.dpos + z.skip, ErrUnsupportedSeek
+	}
+	z.skip += offset
+	return z.dpos + z.skip, nil
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.pos = 0
+	z.src = r
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+	z.checksum.Reset()
+}
+
+// readUint32 reads an uint32 into the supplied buffer.
+// The idea is to make use of the already allocated buffers avoiding additional allocations.
+func (z *Reader) readUint32() (uint32, error) {
+	buf := z.buf[:4]
+	_, err := io.ReadFull(z.src, buf)
+	x := binary.LittleEndian.Uint32(buf)
+	return x, err
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 0000000..2cc8d95
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+	Header
+	// Handler called when a block has been successfully written out.
+	// It provides the number of bytes written.
+	OnBlockDone func(size int)
+
+	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+	dst       io.Writer     // Destination.
+	checksum  xxh32.XXHZero // Frame checksum.
+	zdata     []byte        // Compressed data.
+	data      []byte        // Data to be compressed.
+	idx       int           // Index into data.
+	hashtable [winSize]int  // Hash table used in CompressBlock().
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+	return &Writer{dst: dst}
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+	// Default to 4Mb if BlockMaxSize is not set.
+	if z.Header.BlockMaxSize == 0 {
+		z.Header.BlockMaxSize = bsMapID[7]
+	}
+	// The only option that needs to be validated.
+	bSize := z.Header.BlockMaxSize
+	bSizeID, ok := bsMapValue[bSize]
+	if !ok {
+		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
+	}
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if cap(z.zdata) < bSize {
+		// Only allocate if there is not enough capacity.
+		// Allocate both buffers at once.
+		z.zdata = make([]byte, 2*bSize)
+	}
+	z.data = z.zdata[:bSize]                 // Uncompressed buffer is the first half.
+	z.zdata = z.zdata[:cap(z.zdata)][bSize:] // Compressed buffer is the second half.
+	z.idx = 0
+
+	// Size is optional.
+	buf := z.buf[:]
+
+	// Set the fixed size data: magic number, block max size and flags.
+	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+	flg := byte(Version << 6)
+	flg |= 1 << 5 // No block dependency.
+	if z.Header.BlockChecksum {
+		flg |= 1 << 4
+	}
+	if z.Header.Size > 0 {
+		flg |= 1 << 3
+	}
+	if !z.Header.NoChecksum {
+		flg |= 1 << 2
+	}
+	buf[4] = flg
+	buf[5] = bSizeID << 4
+
+	// Current buffer size: magic(4) + flags(1) + block max size (1).
+	n := 6
+	// Optional items.
+	if z.Header.Size > 0 {
+		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+		n += 8
+	}
+
+	// The header checksum includes the flags, block max size and optional Size.
+	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
+	z.checksum.Reset()
+
+	// Header ready, write it out.
+	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+		return err
+	}
+	z.Header.done = true
+	if debugFlag {
+		debug("wrote header %v", z.Header)
+	}
+
+	return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+func (z *Writer) Write(buf []byte) (int, error) {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return 0, err
+		}
+	}
+	if debugFlag {
+		debug("input buffer len=%d index=%d", len(buf), z.idx)
+	}
+
+	zn := len(z.data)
+	var n int
+	for len(buf) > 0 {
+		if z.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err := z.compressBlock(buf[:zn]); err != nil {
+				return n, err
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(z.data[z.idx:], buf)
+		n += m
+		z.idx += m
+		buf = buf[m:]
+		if debugFlag {
+			debug("%d bytes copied to buf, current index %d", n, z.idx)
+		}
+
+		if z.idx < len(z.data) {
+			// Buffer not filled.
+			if debugFlag {
+				debug("need more data for compression")
+			}
+			return n, nil
+		}
+
+		// Buffer full.
+		if err := z.compressBlock(z.data); err != nil {
+			return n, err
+		}
+		z.idx = 0
+	}
+
+	return n, nil
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(data []byte) error {
+	if !z.NoChecksum {
+		z.checksum.Write(data)
+	}
+
+	// The compressed block size cannot exceed the input's.
+	var zn int
+	var err error
+
+	if level := z.Header.CompressionLevel; level != 0 {
+		zn, err = CompressBlockHC(data, z.zdata, level)
+	} else {
+		zn, err = CompressBlock(data, z.zdata, z.hashtable[:])
+	}
+
+	var zdata []byte
+	var bLen uint32
+	if debugFlag {
+		debug("block compression %d => %d", len(data), zn)
+	}
+	if err == nil && zn > 0 && zn < len(data) {
+		// Compressible and compressed size smaller than uncompressed: ok!
+		bLen = uint32(zn)
+		zdata = z.zdata[:zn]
+	} else {
+		// Uncompressed block.
+		bLen = uint32(len(data)) | compressedBlockFlag
+		zdata = data
+	}
+	if debugFlag {
+		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
+	}
+
+	// Write the block.
+	if err := z.writeUint32(bLen); err != nil {
+		return err
+	}
+	written, err := z.dst.Write(zdata)
+	if err != nil {
+		return err
+	}
+	if h := z.OnBlockDone; h != nil {
+		h(written)
+	}
+
+	if !z.BlockChecksum {
+		if debugFlag {
+			debug("current frame checksum %x", z.checksum.Sum32())
+		}
+		return nil
+	}
+	checksum := xxh32.ChecksumZero(zdata)
+	if debugFlag {
+		debug("block checksum %x", checksum)
+		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
+	}
+	return z.writeUint32(checksum)
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+func (z *Writer) Flush() error {
+	if debugFlag {
+		debug("flush with index %d", z.idx)
+	}
+	if z.idx == 0 {
+		return nil
+	}
+
+	if err := z.compressBlock(z.data[:z.idx]); err != nil {
+		return err
+	}
+	z.idx = 0
+	return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return err
+		}
+	}
+	if err := z.Flush(); err != nil {
+		return err
+	}
+
+	if debugFlag {
+		debug("writing last empty block")
+	}
+	if err := z.writeUint32(0); err != nil {
+		return err
+	}
+	if z.NoChecksum {
+		return nil
+	}
+	checksum := z.checksum.Sum32()
+	if debugFlag {
+		debug("stream checksum %x", checksum)
+	}
+	return z.writeUint32(checksum)
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+	z.Header = Header{}
+	z.dst = w
+	z.checksum.Reset()
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+}
+
+// writeUint32 writes a uint32 to the underlying writer.
+func (z *Writer) writeUint32(x uint32) error {
+	buf := z.buf[:4]
+	binary.LittleEndian.PutUint32(buf, x)
+	_, err := z.dst.Write(buf)
+	return err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 0000000..83c8f82
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 0000000..aead076
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+go:
+    - "1.3"
+    - "1.4"
+    - "1.5"
+    - "1.6"
+    - "1.7"
+    - "1.8"
+    - "1.9"
+    - "1.10"
+    - "1.11"
+    - "1.12"
+
+script:
+    - ./validate.sh
+
+# this should give us faster builds according to 
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 0000000..363fa9e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    1.  Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+
+    2.  Redistributions in binary form must reproduce the above
+        copyright notice, this list of conditions and the following
+        disclaimer in the documentation and/or other materials provided
+        with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 0000000..27ddfee
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,171 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
+
+Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+  10e9, 
+  "127.0.0.1:8086", 
+  "database-name", 
+  "username", 
+  "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+    10e9,                  // interval
+    "example@example.com", // account owner email address
+    "token",               // Librato API token
+    "hostname",            // source
+    []float64{0.95},       // percentiles to send
+    time.Millisecond,      // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* AppOptics - https://github.com/ysamlan/go-metrics-appoptics
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
+* Honeycomb - https://github.com/getspine/go-metrics-honeycomb
+* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront
+* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon
+* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 0000000..bb7b039
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+	Clear()
+	Count() int64
+	Dec(int64)
+	Inc(int64)
+	Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+	if UseNilMetrics {
+		return NilCounter{}
+	}
+	return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+	c := NewCounter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+	panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+	panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+	panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+	count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+	atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+	return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+	atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+	atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+	return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 0000000..179e5aa
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+	"runtime/debug"
+	"sync"
+	"time"
+)
+
+var (
+	debugMetrics struct {
+		GCStats struct {
+			LastGC Gauge
+			NumGC  Gauge
+			Pause  Histogram
+			//PauseQuantiles Histogram
+			PauseTotal Gauge
+		}
+		ReadGCStats Timer
+	}
+	gcStats                  debug.GCStats
+	registerDebugMetricsOnce = sync.Once{}
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureDebugGCStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+	lastGC := gcStats.LastGC
+	t := time.Now()
+	debug.ReadGCStats(&gcStats)
+	debugMetrics.ReadGCStats.UpdateSince(t)
+
+	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+	}
+	//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+	debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats.  The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+	registerDebugMetricsOnce.Do(func() {
+		debugMetrics.GCStats.LastGC = NewGauge()
+		debugMetrics.GCStats.NumGC = NewGauge()
+		debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+		//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+		debugMetrics.GCStats.PauseTotal = NewGauge()
+		debugMetrics.ReadGCStats = NewTimer()
+
+		r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+		r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+		r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+		//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+		r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+		r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+	})
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+	gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 0000000..a8183dd
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,138 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+	Rate() float64
+	Snapshot() EWMA
+	Tick()
+	Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+	if UseNilMetrics {
+		return NilEWMA{}
+	}
+	return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+	alpha     float64
+	rate      uint64
+	init      uint32
+	mutex     sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+	return currentRate
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+	// Optimization to avoid mutex locking in the hot-path.
+	if atomic.LoadUint32(&a.init) == 1 {
+		a.updateRate(a.fetchInstantRate())
+	} else {
+		// Slow-path: this is only needed on the first Tick() and preserves transactional updating
+		// of init and rate in the else block. The first conditional is needed below because
+		// a different thread could have set a.init = 1 between the time of the first atomic load and when
+		// the lock was acquired.
+		a.mutex.Lock()
+		if atomic.LoadUint32(&a.init) == 1 {
+			// The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+			// but again, this section is only invoked on the first successful Tick() operation.
+			a.updateRate(a.fetchInstantRate())
+		} else {
+			atomic.StoreUint32(&a.init, 1)
+			atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+		}
+		a.mutex.Unlock()
+	}
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+	count := atomic.LoadInt64(&a.uncounted)
+	atomic.AddInt64(&a.uncounted, -count)
+	instantRate := float64(count) / float64(5e9)
+	return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+	currentRate += a.alpha * (instantRate - currentRate)
+	atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+	atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 0000000..cb57a93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+	Snapshot() Gauge
+	Update(int64)
+	Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+	c := NewGauge()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+	c := NewFunctionalGauge(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+	value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+	return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+	atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+	return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+	value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+	panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 0000000..3962e6d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,125 @@
+package metrics
+
+import (
+	"math"
+	"sync/atomic"
+)
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+	Snapshot() GaugeFloat64
+	Update(float64)
+	Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &StandardGaugeFloat64{
+		value: 0.0,
+	}
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	c := NewGaugeFloat64()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+	c := NewFunctionalGaugeFloat64(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+	panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+	value uint64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+	return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&g.value))
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+	value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+	panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 0000000..abd0a7d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+	Percentiles   []float64     // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	GraphiteWithConfig(GraphiteConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+		Percentiles:   []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+	})
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := graphite(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+		case Gauge:
+			fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 0000000..445131c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+	Check()
+	Error() error
+	Healthy()
+	Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+	if UseNilMetrics {
+		return NilHealthcheck{}
+	}
+	return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+	err error
+	f   func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+	h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+	return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+	h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy.  The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+	h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 0000000..dbc837f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Sample() Sample
+	Snapshot() Histogram
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+	if UseNilMetrics {
+		return NilHistogram{}
+	}
+	return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+	c := NewHistogram(s)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+	sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 0000000..174b947
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+	"encoding/json"
+	"io"
+	"time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry  periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteJSONOnce(r, w)
+	}
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+	json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 0000000..2614a0a
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"time"
+)
+
+type Logger interface {
+	Printf(format string, v ...interface{})
+}
+
+// Log outputs each metric in the given registry periodically using the given logger.
+func Log(r Registry, freq time.Duration, l Logger) {
+	LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// LogOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger
+func LogOnCue(r Registry, ch chan interface{}, l Logger) {
+	LogScaledOnCue(r, ch, time.Nanosecond, l)
+}
+
+// LogScaled outputs each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+	ch := make(chan interface{})
+	go func(channel chan interface{}) {
+		for _ = range time.Tick(freq) {
+			channel <- struct{}{}
+		}
+	}(ch)
+	LogScaledOnCue(r, ch, scale, l)
+}
+
+// LogScaledOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather
+// than nanos.
+func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) {
+	du := float64(scale)
+	duSuffix := scale.String()[1:]
+
+	for _ = range ch {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				l.Printf("counter %s\n", name)
+				l.Printf("  count:       %9d\n", metric.Count())
+			case Gauge:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %9d\n", metric.Value())
+			case GaugeFloat64:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %f\n", metric.Value())
+			case Healthcheck:
+				metric.Check()
+				l.Printf("healthcheck %s\n", name)
+				l.Printf("  error:       %v\n", metric.Error())
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("histogram %s\n", name)
+				l.Printf("  count:       %9d\n", h.Count())
+				l.Printf("  min:         %9d\n", h.Min())
+				l.Printf("  max:         %9d\n", h.Max())
+				l.Printf("  mean:        %12.2f\n", h.Mean())
+				l.Printf("  stddev:      %12.2f\n", h.StdDev())
+				l.Printf("  median:      %12.2f\n", ps[0])
+				l.Printf("  75%%:         %12.2f\n", ps[1])
+				l.Printf("  95%%:         %12.2f\n", ps[2])
+				l.Printf("  99%%:         %12.2f\n", ps[3])
+				l.Printf("  99.9%%:       %12.2f\n", ps[4])
+			case Meter:
+				m := metric.Snapshot()
+				l.Printf("meter %s\n", name)
+				l.Printf("  count:       %9d\n", m.Count())
+				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", m.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("timer %s\n", name)
+				l.Printf("  count:       %9d\n", t.Count())
+				l.Printf("  min:         %12.2f%s\n", float64(t.Min())/du, duSuffix)
+				l.Printf("  max:         %12.2f%s\n", float64(t.Max())/du, duSuffix)
+				l.Printf("  mean:        %12.2f%s\n", t.Mean()/du, duSuffix)
+				l.Printf("  stddev:      %12.2f%s\n", t.StdDev()/du, duSuffix)
+				l.Printf("  median:      %12.2f%s\n", ps[0]/du, duSuffix)
+				l.Printf("  75%%:         %12.2f%s\n", ps[1]/du, duSuffix)
+				l.Printf("  95%%:         %12.2f%s\n", ps[2]/du, duSuffix)
+				l.Printf("  99%%:         %12.2f%s\n", ps[3]/du, duSuffix)
+				l.Printf("  99.9%%:       %12.2f%s\n", ps[4]/du, duSuffix)
+				l.Printf("  1-min rate:  %12.2f\n", t.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", t.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", t.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", t.RateMean())
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 0000000..47454f5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+	time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak:    42604 kB
+VmSize:    42604 kB
+VmLck:         0 kB
+VmHWM:      1120 kB
+VmRSS:      1120 kB
+VmData:    35460 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        36 kB
+VmSwap:        0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+	"fmt"
+	"metrics"
+	"time"
+)
+
+func main() {
+	fmt.Sprintf("foo")
+	metrics.NewRegistry()
+	time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak:    44016 kB
+VmSize:    44016 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak:    55024 kB
+VmSize:    55024 kB
+VmLck:         0 kB
+VmHWM:     12440 kB
+VmRSS:     12440 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak:    44012 kB
+VmSize:    44012 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak:    55020 kB
+VmSize:    55020 kB
+VmLck:         0 kB
+VmHWM:     12432 kB
+VmRSS:     12432 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        60 kB
+VmSwap:        0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak:    72272 kB
+VmSize:    72272 kB
+VmLck:         0 kB
+VmHWM:     16204 kB
+VmRSS:     16204 kB
+VmData:    65100 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        80 kB
+VmSwap:        0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   256912 kB
+VmSize:   256912 kB
+VmLck:         0 kB
+VmHWM:    146204 kB
+VmRSS:    146204 kB
+VmData:   249740 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       448 kB
+VmSwap:        0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   908112 kB
+VmSize:   908112 kB
+VmLck:         0 kB
+VmHWM:    645832 kB
+VmRSS:    645588 kB
+VmData:   900940 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1716 kB
+VmSwap:     1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak:    62480 kB
+VmSize:    62480 kB
+VmLck:         0 kB
+VmHWM:     11572 kB
+VmRSS:     11572 kB
+VmData:    55308 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   153296 kB
+VmSize:   153296 kB
+VmLck:         0 kB
+VmHWM:    101176 kB
+VmRSS:    101176 kB
+VmData:   146124 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       240 kB
+VmSwap:        0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   557264 kB
+VmSize:   557264 kB
+VmLck:         0 kB
+VmHWM:    501056 kB
+VmRSS:    501056 kB
+VmData:   550092 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1032 kB
+VmSwap:        0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak:    74504 kB
+VmSize:    74504 kB
+VmLck:         0 kB
+VmHWM:     24124 kB
+VmRSS:     24124 kB
+VmData:    67340 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:        92 kB
+VmSwap:        0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak:   278920 kB
+VmSize:   278920 kB
+VmLck:         0 kB
+VmHWM:    227300 kB
+VmRSS:    227300 kB
+VmData:   271756 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:       488 kB
+VmSwap:        0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 0000000..223669b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,251 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+	Count() int64
+	Mark(int64)
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Meter
+	Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+	if UseNilMetrics {
+		return NilMeter{}
+	}
+	m := newStandardMeter()
+	arbiter.Lock()
+	defer arbiter.Unlock()
+	arbiter.meters[m] = struct{}{}
+	if !arbiter.started {
+		arbiter.started = true
+		go arbiter.tick()
+	}
+	return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+	c := NewMeter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+	count                          int64
+	rate1, rate5, rate15, rateMean uint64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+	snapshot    *MeterSnapshot
+	a1, a5, a15 EWMA
+	startTime   time.Time
+	stopped     uint32
+}
+
+func newStandardMeter() *StandardMeter {
+	return &StandardMeter{
+		snapshot:  &MeterSnapshot{},
+		a1:        NewEWMA1(),
+		a5:        NewEWMA5(),
+		a15:       NewEWMA15(),
+		startTime: time.Now(),
+	}
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+	if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) {
+		arbiter.Lock()
+		delete(arbiter.meters, m)
+		arbiter.Unlock()
+	}
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+	return atomic.LoadInt64(&m.snapshot.count)
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+	if atomic.LoadUint32(&m.stopped) == 1 {
+		return
+	}
+
+	atomic.AddInt64(&m.snapshot.count, n)
+
+	m.a1.Update(n)
+	m.a5.Update(n)
+	m.a15.Update(n)
+	m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+	copiedSnapshot := MeterSnapshot{
+		count:    atomic.LoadInt64(&m.snapshot.count),
+		rate1:    atomic.LoadUint64(&m.snapshot.rate1),
+		rate5:    atomic.LoadUint64(&m.snapshot.rate5),
+		rate15:   atomic.LoadUint64(&m.snapshot.rate15),
+		rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+	}
+	return &copiedSnapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+	rate1 := math.Float64bits(m.a1.Rate())
+	rate5 := math.Float64bits(m.a5.Rate())
+	rate15 := math.Float64bits(m.a15.Rate())
+	rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+	atomic.StoreUint64(&m.snapshot.rate1, rate1)
+	atomic.StoreUint64(&m.snapshot.rate5, rate5)
+	atomic.StoreUint64(&m.snapshot.rate15, rate15)
+	atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
+}
+
+func (m *StandardMeter) tick() {
+	m.a1.Tick()
+	m.a5.Tick()
+	m.a15.Tick()
+	m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+	sync.RWMutex
+	started bool
+	meters  map[*StandardMeter]struct{}
+	ticker  *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+	for {
+		select {
+		case <-ma.ticker.C:
+			ma.tickMeters()
+		}
+	}
+}
+
+func (ma *meterArbiter) tickMeters() {
+	ma.RLock()
+	defer ma.RUnlock()
+	for meter := range ma.meters {
+		meter.tick()
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 0000000..b97a49e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+// <https://github.com/rcrowley/go-metrics>
+//
+// Coda Hale's original work: <https://github.com/codahale/metrics>
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics.  If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 0000000..266b6c9
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	OpenTSDBWithConfig(OpenTSDBConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+	})
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := openTSDB(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+func getShortHostname() string {
+	if shortHostName == "" {
+		host, _ := os.Hostname()
+		if index := strings.Index(host, "."); index > 0 {
+			shortHostName = host[:index]
+		} else {
+			shortHostName = host
+		}
+	}
+	return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+	shortHostname := getShortHostname()
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+		case Gauge:
+			fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 0000000..a8e6722
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,373 @@
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists.  If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+	return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+	// Call the given function for each registered metric.
+	Each(func(string, interface{}))
+
+	// Get the metric by the given name or nil if none is registered.
+	Get(string) interface{}
+
+	// GetAll metrics in the Registry.
+	GetAll() map[string]map[string]interface{}
+
+	// Gets an existing metric or registers the given one.
+	// The interface can be the metric to register if not found in registry,
+	// or a function returning the metric for lazy instantiation.
+	GetOrRegister(string, interface{}) interface{}
+
+	// Register the given metric under the given name.
+	Register(string, interface{}) error
+
+	// Run all registered healthchecks.
+	RunHealthchecks()
+
+	// Unregister the metric with the given name.
+	Unregister(string)
+
+	// Unregister all metrics.  (Mostly for testing.)
+	UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+	metrics map[string]interface{}
+	mutex   sync.RWMutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+	return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+	metrics := r.registered()
+	for i := range metrics {
+		kv := &metrics[i]
+		f(kv.name, kv.value)
+	}
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+	// access the read lock first which should be re-entrant
+	r.mutex.RLock()
+	metric, ok := r.metrics[name]
+	r.mutex.RUnlock()
+	if ok {
+		return metric
+	}
+
+	// only take the write lock if we'll be modifying the metrics map
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if metric, ok := r.metrics[name]; ok {
+		return metric
+	}
+	if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+		i = v.Call(nil)[0].Interface()
+	}
+	r.register(name, i)
+	return i
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	for _, i := range r.metrics {
+		if h, ok := i.(Healthcheck); ok {
+			h.Check()
+		}
+	}
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+	data := make(map[string]map[string]interface{})
+	r.Each(func(name string, i interface{}) {
+		values := make(map[string]interface{})
+		switch metric := i.(type) {
+		case Counter:
+			values["count"] = metric.Count()
+		case Gauge:
+			values["value"] = metric.Value()
+		case GaugeFloat64:
+			values["value"] = metric.Value()
+		case Healthcheck:
+			values["error"] = nil
+			metric.Check()
+			if err := metric.Error(); nil != err {
+				values["error"] = metric.Error().Error()
+			}
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+		case Meter:
+			m := metric.Snapshot()
+			values["count"] = m.Count()
+			values["1m.rate"] = m.Rate1()
+			values["5m.rate"] = m.Rate5()
+			values["15m.rate"] = m.Rate15()
+			values["mean.rate"] = m.RateMean()
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
+		}
+		data[name] = values
+	})
+	return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.stop(name)
+	delete(r.metrics, name)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name, _ := range r.metrics {
+		r.stop(name)
+		delete(r.metrics, name)
+	}
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+	if _, ok := r.metrics[name]; ok {
+		return DuplicateMetric(name)
+	}
+	switch i.(type) {
+	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+		r.metrics[name] = i
+	}
+	return nil
+}
+
+type metricKV struct {
+	name  string
+	value interface{}
+}
+
+func (r *StandardRegistry) registered() []metricKV {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	metrics := make([]metricKV, 0, len(r.metrics))
+	for name, i := range r.metrics {
+		metrics = append(metrics, metricKV{
+			name:  name,
+			value: i,
+		})
+	}
+	return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+	if i, ok := r.metrics[name]; ok {
+		if s, ok := i.(Stoppable); ok {
+			s.Stop()
+		}
+	}
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+	Stop()
+}
+
+type PrefixedRegistry struct {
+	underlying Registry
+	prefix     string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: NewRegistry(),
+		prefix:     prefix,
+	}
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: parent,
+		prefix:     prefix,
+	}
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+	wrappedFn := func(prefix string) func(string, interface{}) {
+		return func(name string, iface interface{}) {
+			if strings.HasPrefix(name, prefix) {
+				fn(name, iface)
+			} else {
+				return
+			}
+		}
+	}
+
+	baseRegistry, prefix := findPrefix(r, "")
+	baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+	switch r := registry.(type) {
+	case *PrefixedRegistry:
+		return findPrefix(r.underlying, r.prefix+prefix)
+	case *StandardRegistry:
+		return r, prefix
+	}
+	return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+	realName := r.prefix + name
+	return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	realName := r.prefix + name
+	return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+	realName := r.prefix + name
+	return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+	r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+	return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+	realName := r.prefix + name
+	r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+	r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+	DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+	return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+	return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+	return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name.  Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+	if err := Register(name, i); err != nil {
+		panic(err)
+	}
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+	DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+	DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 0000000..4047ab3
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,216 @@
+package metrics
+
+import (
+	"runtime"
+	"runtime/pprof"
+	"sync"
+	"time"
+)
+
+var (
+	memStats       runtime.MemStats
+	runtimeMetrics struct {
+		MemStats struct {
+			Alloc         Gauge
+			BuckHashSys   Gauge
+			DebugGC       Gauge
+			EnableGC      Gauge
+			Frees         Gauge
+			HeapAlloc     Gauge
+			HeapIdle      Gauge
+			HeapInuse     Gauge
+			HeapObjects   Gauge
+			HeapReleased  Gauge
+			HeapSys       Gauge
+			LastGC        Gauge
+			Lookups       Gauge
+			Mallocs       Gauge
+			MCacheInuse   Gauge
+			MCacheSys     Gauge
+			MSpanInuse    Gauge
+			MSpanSys      Gauge
+			NextGC        Gauge
+			NumGC         Gauge
+			GCCPUFraction GaugeFloat64
+			PauseNs       Histogram
+			PauseTotalNs  Gauge
+			StackInuse    Gauge
+			StackSys      Gauge
+			Sys           Gauge
+			TotalAlloc    Gauge
+		}
+		NumCgoCall   Gauge
+		NumGoroutine Gauge
+		NumThread    Gauge
+		ReadMemStats Timer
+	}
+	frees       uint64
+	lookups     uint64
+	mallocs     uint64
+	numGC       uint32
+	numCgoCalls int64
+
+	threadCreateProfile        = pprof.Lookup("threadcreate")
+	registerRuntimeMetricsOnce = sync.Once{}
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureRuntimeMemStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called in a background
+// goroutine.  Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+	t := time.Now()
+	runtime.ReadMemStats(&memStats) // This takes 50-200us.
+	runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+	runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+	runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+	if memStats.DebugGC {
+		runtimeMetrics.MemStats.DebugGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.DebugGC.Update(0)
+	}
+	if memStats.EnableGC {
+		runtimeMetrics.MemStats.EnableGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.EnableGC.Update(0)
+	}
+
+	runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+	runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+	runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+	runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+	runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+	runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+	runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+	runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+	runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+	runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+	runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+	runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+	runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+	runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+	runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+	runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+	runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+	// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+	i := numGC % uint32(len(memStats.PauseNs))
+	ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+	if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+		for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	} else {
+		if i > ii {
+			for ; i < uint32(len(memStats.PauseNs)); i++ {
+				runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+			}
+			i = 0
+		}
+		for ; i < ii; i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	}
+	frees = memStats.Frees
+	lookups = memStats.Lookups
+	mallocs = memStats.Mallocs
+	numGC = memStats.NumGC
+
+	runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+	runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+	runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+	runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+	runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+	currentNumCgoCalls := numCgoCall()
+	runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+	numCgoCalls = currentNumCgoCalls
+
+	runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+	runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats.  The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+	registerRuntimeMetricsOnce.Do(func() {
+		runtimeMetrics.MemStats.Alloc = NewGauge()
+		runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+		runtimeMetrics.MemStats.DebugGC = NewGauge()
+		runtimeMetrics.MemStats.EnableGC = NewGauge()
+		runtimeMetrics.MemStats.Frees = NewGauge()
+		runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+		runtimeMetrics.MemStats.HeapIdle = NewGauge()
+		runtimeMetrics.MemStats.HeapInuse = NewGauge()
+		runtimeMetrics.MemStats.HeapObjects = NewGauge()
+		runtimeMetrics.MemStats.HeapReleased = NewGauge()
+		runtimeMetrics.MemStats.HeapSys = NewGauge()
+		runtimeMetrics.MemStats.LastGC = NewGauge()
+		runtimeMetrics.MemStats.Lookups = NewGauge()
+		runtimeMetrics.MemStats.Mallocs = NewGauge()
+		runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+		runtimeMetrics.MemStats.MCacheSys = NewGauge()
+		runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+		runtimeMetrics.MemStats.MSpanSys = NewGauge()
+		runtimeMetrics.MemStats.NextGC = NewGauge()
+		runtimeMetrics.MemStats.NumGC = NewGauge()
+		runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+		runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+		runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+		runtimeMetrics.MemStats.StackInuse = NewGauge()
+		runtimeMetrics.MemStats.StackSys = NewGauge()
+		runtimeMetrics.MemStats.Sys = NewGauge()
+		runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+		runtimeMetrics.NumCgoCall = NewGauge()
+		runtimeMetrics.NumGoroutine = NewGauge()
+		runtimeMetrics.NumThread = NewGauge()
+		runtimeMetrics.ReadMemStats = NewTimer()
+
+		r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+		r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+		r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+		r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+		r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+		r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+		r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+		r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+		r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+		r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+		r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+		r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+		r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+		r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+		r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+		r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+		r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+		r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+		r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+		r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+		r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+		r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+		r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+		r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+		r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+		r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+		r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+		r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+		r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+		r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+		r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+	})
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 0000000..e3391f4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+	return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 0000000..ca12c05
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 0000000..616a3b4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 0000000..be96aa6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 0000000..fecee5e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Values() []int64
+	Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
+type ExpDecaySample struct {
+	alpha         float64
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	t0, t1        time.Time
+	values        *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	s := &ExpDecaySample{
+		alpha:         alpha,
+		reservoirSize: reservoirSize,
+		t0:            time.Now(),
+		values:        newExpDecaySampleHeap(reservoirSize),
+	}
+	s.t1 = s.t0.Add(rescaleThreshold)
+	return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.t0 = time.Now()
+	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if s.values.Size() == s.reservoirSize {
+		s.values.Pop()
+	}
+	s.values.Push(expDecaySample{
+		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+		v: v,
+	})
+	if t.After(s.t1) {
+		values := s.values.Values()
+		t0 := s.t0
+		s.values.Clear()
+		s.t0 = t
+		s.t1 = s.t0.Add(rescaleThreshold)
+		for _, v := range values {
+			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+			s.values.Push(v)
+		}
+	}
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+	return &SampleSnapshot{
+		count:  count,
+		values: values,
+	}
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	values        []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	return &UniformSample{
+		reservoirSize: reservoirSize,
+		values:        make([]int64, 0, reservoirSize),
+	}
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if len(s.values) < s.reservoirSize {
+		s.values = append(s.values, v)
+	} else {
+		r := rand.Int63n(s.count)
+		if r < int64(len(s.values)) {
+			s.values[int(r)] = v
+		}
+	}
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+	k float64
+	v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+	s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+	h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+	n := len(h.s)
+	h.s = h.s[0 : n+1]
+	h.s[n] = s
+	h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+	n := len(h.s) - 1
+	h.s[0], h.s[n] = h.s[n], h.s[0]
+	h.down(0, n)
+
+	n = len(h.s)
+	s := h.s[n-1]
+	h.s = h.s[0 : n-1]
+	return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+	return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+	return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+	for {
+		i := (j - 1) / 2 // parent
+		if i == j || !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		j = i
+	}
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+	for {
+		j1 := 2*i + 1
+		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+			break
+		}
+		j := j1 // left child
+		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+			j = j2 // = 2*i + 2  // right child
+		}
+		if !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		i = j
+	}
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 0000000..693f190
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+	"fmt"
+	"log/syslog"
+	"time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+	for _ = range time.Tick(d) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+			case Gauge:
+				w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+			case GaugeFloat64:
+				w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+			case Healthcheck:
+				metric.Check()
+				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+					name,
+					h.Count(),
+					h.Min(),
+					h.Max(),
+					h.Mean(),
+					h.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+				))
+			case Meter:
+				m := metric.Snapshot()
+				w.Info(fmt.Sprintf(
+					"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+					name,
+					m.Count(),
+					m.Rate1(),
+					m.Rate5(),
+					m.Rate15(),
+					m.RateMean(),
+				))
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+					name,
+					t.Count(),
+					t.Min(),
+					t.Max(),
+					t.Mean(),
+					t.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+					t.Rate1(),
+					t.Rate5(),
+					t.Rate15(),
+					t.RateMean(),
+				))
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 0000000..d6ec4c6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Timer
+	StdDev() float64
+	Stop()
+	Sum() int64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+	Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: h,
+		meter:     m,
+	}
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+	c := NewTimer()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+		meter:     NewMeter(),
+	}
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+	h Histogram
+	m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+	return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+	return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+	return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+	return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+	return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+	return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+	return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:     t.meter.Snapshot().(*MeterSnapshot),
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+	return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+	t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+	return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100644
index 0000000..c4ae91e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 0000000..091e971
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteOnce(r, w)
+	}
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+	var namedMetrics namedMetricSlice
+	r.Each(func(name string, i interface{}) {
+		namedMetrics = append(namedMetrics, namedMetric{name, i})
+	})
+
+	sort.Sort(namedMetrics)
+	for _, namedMetric := range namedMetrics {
+		switch metric := namedMetric.m.(type) {
+		case Counter:
+			fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", metric.Count())
+		case Gauge:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %9d\n", metric.Value())
+		case GaugeFloat64:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %f\n", metric.Value())
+		case Healthcheck:
+			metric.Check()
+			fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  error:       %v\n", metric.Error())
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", h.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", h.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", h.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", h.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", h.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", m.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", t.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", t.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", t.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", t.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", t.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", t.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", t.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", t.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", t.RateMean())
+		}
+	}
+}
+
+type namedMetric struct {
+	name string
+	m    interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+	return nms[i].name < nms[j].name
+}